diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 3e6708180..ac1c24d10 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -2cee201b2e8d656f7306b2f9ec98edfa721e9829 \ No newline at end of file +a8f547d3728fba835fbdda301e846829c5cbbef5 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index a0bfc0940..3bfcfa2e1 100755 --- a/.gitattributes +++ b/.gitattributes @@ -1,12 +1,14 @@ databricks/sdk/__init__.py linguist-generated=true databricks/sdk/errors/overrides.py linguist-generated=true databricks/sdk/errors/platform.py linguist-generated=true +databricks/sdk/service/aibuilder.py linguist-generated=true databricks/sdk/service/apps.py linguist-generated=true databricks/sdk/service/billing.py linguist-generated=true databricks/sdk/service/catalog.py linguist-generated=true databricks/sdk/service/cleanrooms.py linguist-generated=true databricks/sdk/service/compute.py linguist-generated=true databricks/sdk/service/dashboards.py linguist-generated=true +databricks/sdk/service/database.py linguist-generated=true databricks/sdk/service/files.py linguist-generated=true databricks/sdk/service/iam.py linguist-generated=true databricks/sdk/service/jobs.py linguist-generated=true @@ -15,6 +17,7 @@ databricks/sdk/service/ml.py linguist-generated=true databricks/sdk/service/oauth2.py linguist-generated=true databricks/sdk/service/pipelines.py linguist-generated=true databricks/sdk/service/provisioning.py linguist-generated=true +databricks/sdk/service/qualitymonitorv2.py linguist-generated=true databricks/sdk/service/serving.py linguist-generated=true databricks/sdk/service/settings.py linguist-generated=true databricks/sdk/service/sharing.py linguist-generated=true diff --git a/.github/workflows/tagging.yml b/.github/workflows/tagging.yml index 558f2993a..d4486fb51 100644 --- a/.github/workflows/tagging.yml +++ b/.github/workflows/tagging.yml @@ -21,7 +21,7 @@ jobs: steps: - name: Generate GitHub App Token id: generate-token - uses: actions/create-github-app-token@v1 + uses: actions/create-github-app-token@v2 with: app-id: ${{ secrets.DECO_SDK_TAGGING_APP_ID }} private-key: ${{ secrets.DECO_SDK_TAGGING_PRIVATE_KEY }} @@ -49,4 +49,3 @@ jobs: GITHUB_REPOSITORY: ${{ github.repository }} run: | python tagging.py - diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 8ac78ce49..ad8511e48 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -11,3 +11,77 @@ ### Internal Changes ### API Changes +* Added `databricks.sdk.service.aibuilder`, `databricks.sdk.service.database` and `databricks.sdk.service.qualitymonitorv2` packages. +* Added [w.custom_llms](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/aibuilder/custom_llms.html) workspace-level service. +* Added [w.dashboard_email_subscriptions](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/settings/settings/dashboard_email_subscriptions.html) workspace-level service and [w.sql_results_download](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/settings/settings/sql_results_download.html) workspace-level service. +* Added [w.database](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/database/database.html) workspace-level service. +* Added [w.quality_monitor_v2](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/qualitymonitorv2/quality_monitor_v2.html) workspace-level service. +* Added `update_private_endpoint_rule()` method for [a.network_connectivity](https://databricks-sdk-py.readthedocs.io/en/latest/account/settings/network_connectivity.html) account-level service. +* Added `list_spaces()` method for [w.genie](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/dashboards/genie.html) workspace-level service. +* Added `page_token` field for `databricks.sdk.service.billing.ListLogDeliveryRequest`. +* Added `next_page_token` field for `databricks.sdk.service.billing.WrappedLogDeliveryConfigurations`. +* Added `next_page_token` field for `databricks.sdk.service.catalog.EffectivePermissionsList`. +* Added `max_results` and `page_token` fields for `databricks.sdk.service.catalog.GetEffectiveRequest`. +* Added `max_results` and `page_token` fields for `databricks.sdk.service.catalog.GetGrantRequest`. +* Added `next_page_token` field for `databricks.sdk.service.catalog.ListMetastoresResponse`. +* Added `clean_room_name` field for `databricks.sdk.service.cleanrooms.CleanRoomAsset`. +* [Breaking] Added `name` field for `databricks.sdk.service.cleanrooms.DeleteCleanRoomAssetRequest`. +* [Breaking] Added `name` field for `databricks.sdk.service.cleanrooms.GetCleanRoomAssetRequest`. +* Added `trigger_state` field for `databricks.sdk.service.jobs.BaseJob`. +* Added `trigger_state` field for `databricks.sdk.service.jobs.Job`. +* Added `dbt_cloud_output` field for `databricks.sdk.service.jobs.RunOutput`. +* Added `dbt_cloud_task` field for `databricks.sdk.service.jobs.RunTask`. +* Added `dbt_cloud_task` field for `databricks.sdk.service.jobs.SubmitTask`. +* Added `dbt_cloud_task` field for `databricks.sdk.service.jobs.Task`. +* Added `tags` field for `databricks.sdk.service.pipelines.CreatePipeline`. +* Added `tags` field for `databricks.sdk.service.pipelines.EditPipeline`. +* Added `tags` field for `databricks.sdk.service.pipelines.PipelineSpec`. +* Added `max_provisioned_concurrency` and `min_provisioned_concurrency` fields for `databricks.sdk.service.serving.ServedEntityInput`. +* Added `max_provisioned_concurrency` and `min_provisioned_concurrency` fields for `databricks.sdk.service.serving.ServedEntityOutput`. +* Added `max_provisioned_concurrency` and `min_provisioned_concurrency` fields for `databricks.sdk.service.serving.ServedModelInput`. +* Added `max_provisioned_concurrency` and `min_provisioned_concurrency` fields for `databricks.sdk.service.serving.ServedModelOutput`. +* Added `endpoint_service` and `resource_names` fields for `databricks.sdk.service.settings.CreatePrivateEndpointRule`. +* Added `aws_private_endpoint_rules` field for `databricks.sdk.service.settings.NccEgressTargetRules`. +* Added `task_time_over_time_range` field for `databricks.sdk.service.sql.QueryMetrics`. +* Added `deltasharing_catalog`, `foreign_catalog`, `internal_catalog`, `managed_catalog`, `managed_online_catalog`, `system_catalog` and `unknown_catalog_type` enum values for `databricks.sdk.service.catalog.CatalogType`. +* Added `ga4_raw_data`, `power_bi`, `salesforce`, `salesforce_data_cloud`, `servicenow`, `unknown_connection_type` and `workday_raas` enum values for `databricks.sdk.service.catalog.ConnectionType`. +* Added `oauth_access_token`, `oauth_m2m`, `oauth_refresh_token`, `oauth_resource_owner_password`, `oauth_u2m`, `oauth_u2m_mapping`, `oidc_token`, `pem_private_key`, `service_credential` and `unknown_credential_type` enum values for `databricks.sdk.service.catalog.CredentialType`. +* Added `internal` and `internal_and_external` enum values for `databricks.sdk.service.catalog.DeltaSharingScopeEnum`. +* Added `catalog`, `clean_room`, `connection`, `credential`, `external_location`, `external_metadata`, `function`, `metastore`, `pipeline`, `provider`, `recipient`, `schema`, `share`, `staging_table`, `storage_credential`, `table`, `unknown_securable_type` and `volume` enum values for `databricks.sdk.service.catalog.SecurableType`. +* Added `cluster_migrated` enum value for `databricks.sdk.service.compute.EventType`. +* Added `driver_unhealthy` enum value for `databricks.sdk.service.compute.TerminationReasonCode`. +* Added `teradata` enum value for `databricks.sdk.service.pipelines.IngestionSourceType`. +* Added `oidc_federation` enum value for `databricks.sdk.service.sharing.AuthenticationType`. +* [Breaking] Changed `create()` method for [a.log_delivery](https://databricks-sdk-py.readthedocs.io/en/latest/account/billing/log_delivery.html) account-level service with new required argument order. +* [Breaking] Changed `get()` method for [a.log_delivery](https://databricks-sdk-py.readthedocs.io/en/latest/account/billing/log_delivery.html) account-level service to return `databricks.sdk.service.billing.GetLogDeliveryConfigurationResponse` dataclass. +* [Breaking] Changed `create_private_endpoint_rule()`, `delete_private_endpoint_rule()` and `get_private_endpoint_rule()` methods for [a.network_connectivity](https://databricks-sdk-py.readthedocs.io/en/latest/account/settings/network_connectivity.html) account-level service to return `databricks.sdk.service.settings.NccPrivateEndpointRule` dataclass. +* [Breaking] Changed `list_private_endpoint_rules()` method for [a.network_connectivity](https://databricks-sdk-py.readthedocs.io/en/latest/account/settings/network_connectivity.html) account-level service to return `databricks.sdk.service.settings.ListPrivateEndpointRulesResponse` dataclass. +* [Breaking] Changed `delete()` and `get()` methods for [w.clean_room_assets](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/cleanrooms/clean_room_assets.html) workspace-level service . Method path has changed. +* [Breaking] Changed `delete()` and `get()` methods for [w.clean_room_assets](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/cleanrooms/clean_room_assets.html) workspace-level service with new required argument order. +* [Breaking] Changed `get()` method for [w.grants](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/catalog/grants.html) workspace-level service to return `databricks.sdk.service.catalog.GetPermissionsResponse` dataclass. +* [Breaking] Changed `update()` method for [w.grants](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/catalog/grants.html) workspace-level service to return `databricks.sdk.service.catalog.UpdatePermissionsResponse` dataclass. +* [Breaking] Changed `list()` method for [w.metastores](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/catalog/metastores.html) workspace-level service to require request of `databricks.sdk.service.catalog.ListMetastoresRequest` dataclass. +* Changed `account_id`, `credentials_id`, `log_type`, `output_format` and `storage_configuration_id` fields for `databricks.sdk.service.billing.LogDeliveryConfiguration` to be required. +* Changed `message` and `status` fields for `databricks.sdk.service.billing.LogDeliveryStatus` to be required. +* [Breaking] Changed `log_delivery_configuration` field for `databricks.sdk.service.billing.WrappedCreateLogDeliveryConfiguration` to be required. +* [Breaking] Changed `securable_type` field for `databricks.sdk.service.catalog.ConnectionInfo` to type `databricks.sdk.service.catalog.SecurableType` dataclass. +* [Breaking] Changed `securable_type` field for `databricks.sdk.service.catalog.GetEffectiveRequest` to type `str` dataclass. +* [Breaking] Changed `securable_type` field for `databricks.sdk.service.catalog.GetGrantRequest` to type `str` dataclass. +* [Breaking] Changed `delta_sharing_scope` field for `databricks.sdk.service.catalog.GetMetastoreSummaryResponse` to type `databricks.sdk.service.catalog.DeltaSharingScopeEnum` dataclass. +* [Breaking] Changed `delta_sharing_scope` field for `databricks.sdk.service.catalog.MetastoreInfo` to type `databricks.sdk.service.catalog.DeltaSharingScopeEnum` dataclass. +* [Breaking] Changed `catalog_type` field for `databricks.sdk.service.catalog.SchemaInfo` to type `databricks.sdk.service.catalog.CatalogType` dataclass. +* [Breaking] Changed `delta_sharing_scope` field for `databricks.sdk.service.catalog.UpdateMetastore` to type `databricks.sdk.service.catalog.DeltaSharingScopeEnum` dataclass. +* [Breaking] Changed `securable_type` field for `databricks.sdk.service.catalog.UpdatePermissions` to type `str` dataclass. +* Changed `resource_id` field for `databricks.sdk.service.settings.CreatePrivateEndpointRule` to no longer be required. +* [Breaking] Changed pagination for [NetworkConnectivityAPI.list_private_endpoint_rules](https://databricks-sdk-py.readthedocs.io/en/latest/account/settings/network_connectivity.html#databricks.sdk.service.settings.NetworkConnectivityAPI.list_private_endpoint_rules) method. +* [Breaking] Removed [w.database_instances](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/catalog/database_instances.html) workspace-level service. +* [Breaking] Removed [w.query_execution](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/dashboards/query_execution.html) workspace-level service. +* [Breaking] Removed `update_ncc_azure_private_endpoint_rule_public()` method for [a.network_connectivity](https://databricks-sdk-py.readthedocs.io/en/latest/account/settings/network_connectivity.html) account-level service. +* [Breaking] Removed `get_credentials_for_trace_data_download()`, `get_credentials_for_trace_data_upload()` and `list_logged_model_artifacts()` methods for [w.experiments](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/ml/experiments.html) workspace-level service. +* [Breaking] Removed `get_published_dashboard_embedded()` method for [w.lakeview_embedded](https://databricks-sdk-py.readthedocs.io/en/latest/workspace/dashboards/lakeview_embedded.html) workspace-level service. +* [Breaking] Removed `asset_full_name` field for `databricks.sdk.service.cleanrooms.DeleteCleanRoomAssetRequest`. +* [Breaking] Removed `asset_full_name` field for `databricks.sdk.service.cleanrooms.GetCleanRoomAssetRequest`. +* [Breaking] Removed `internal` and `internal_and_external` enum values for `databricks.sdk.service.catalog.GetMetastoreSummaryResponseDeltaSharingScope`. +* [Breaking] Removed `internal` and `internal_and_external` enum values for `databricks.sdk.service.catalog.MetastoreInfoDeltaSharingScope`. +* [Breaking] Removed `catalog`, `clean_room`, `connection`, `credential`, `external_location`, `external_metadata`, `function`, `metastore`, `pipeline`, `provider`, `recipient`, `schema`, `share`, `staging_table`, `storage_credential`, `table`, `unknown_securable_type` and `volume` enum values for `databricks.sdk.service.catalog.SecurableType`. +* [Breaking] Removed `internal` and `internal_and_external` enum values for `databricks.sdk.service.catalog.UpdateMetastoreDeltaSharingScope`. diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index f75645d25..612d1dd1b 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -13,12 +13,14 @@ from databricks.sdk.mixins.jobs import JobsExt from databricks.sdk.mixins.open_ai_client import ServingEndpointsExt from databricks.sdk.mixins.workspace import WorkspaceExt +from databricks.sdk.service import aibuilder as pkg_aibuilder from databricks.sdk.service import apps as pkg_apps from databricks.sdk.service import billing as pkg_billing from databricks.sdk.service import catalog as pkg_catalog from databricks.sdk.service import cleanrooms as pkg_cleanrooms from databricks.sdk.service import compute as pkg_compute from databricks.sdk.service import dashboards as pkg_dashboards +from databricks.sdk.service import database as pkg_database from databricks.sdk.service import files as pkg_files from databricks.sdk.service import iam as pkg_iam from databricks.sdk.service import jobs as pkg_jobs @@ -27,12 +29,14 @@ from databricks.sdk.service import oauth2 as pkg_oauth2 from databricks.sdk.service import pipelines as pkg_pipelines from databricks.sdk.service import provisioning as pkg_provisioning +from databricks.sdk.service import qualitymonitorv2 as pkg_qualitymonitorv2 from databricks.sdk.service import serving as pkg_serving from databricks.sdk.service import settings as pkg_settings from databricks.sdk.service import sharing as pkg_sharing from databricks.sdk.service import sql as pkg_sql from databricks.sdk.service import vectorsearch as pkg_vectorsearch from databricks.sdk.service import workspace as pkg_workspace +from databricks.sdk.service.aibuilder import CustomLlmsAPI from databricks.sdk.service.apps import AppsAPI from databricks.sdk.service.billing import (BillableUsageAPI, BudgetPolicyAPI, BudgetsAPI, LogDeliveryAPI, @@ -42,7 +46,6 @@ AccountStorageCredentialsAPI, ArtifactAllowlistsAPI, CatalogsAPI, ConnectionsAPI, CredentialsAPI, - DatabaseInstancesAPI, ExternalLocationsAPI, FunctionsAPI, GrantsAPI, MetastoresAPI, ModelVersionsAPI, OnlineTablesAPI, @@ -65,8 +68,8 @@ PolicyComplianceForClustersAPI, PolicyFamiliesAPI) from databricks.sdk.service.dashboards import (GenieAPI, LakeviewAPI, - LakeviewEmbeddedAPI, - QueryExecutionAPI) + LakeviewEmbeddedAPI) +from databricks.sdk.service.database import DatabaseAPI from databricks.sdk.service.files import DbfsAPI, FilesAPI from databricks.sdk.service.iam import (AccessControlAPI, AccountAccessControlAPI, @@ -98,6 +101,7 @@ NetworksAPI, PrivateAccessAPI, StorageAPI, VpcEndpointsAPI, Workspace, WorkspacesAPI) +from databricks.sdk.service.qualitymonitorv2 import QualityMonitorV2API from databricks.sdk.service.serving import (ServingEndpointsAPI, ServingEndpointsDataPlaneAPI) from databricks.sdk.service.settings import ( @@ -105,16 +109,17 @@ AibiDashboardEmbeddingAccessPolicyAPI, AibiDashboardEmbeddingApprovedDomainsAPI, AutomaticClusterUpdateAPI, ComplianceSecurityProfileAPI, CredentialsManagerAPI, - CspEnablementAccountAPI, DefaultNamespaceAPI, DisableLegacyAccessAPI, - DisableLegacyDbfsAPI, DisableLegacyFeaturesAPI, EnableExportNotebookAPI, - EnableIpAccessListsAPI, EnableNotebookTableClipboardAPI, - EnableResultsDownloadingAPI, EnhancedSecurityMonitoringAPI, - EsmEnablementAccountAPI, IpAccessListsAPI, + CspEnablementAccountAPI, DashboardEmailSubscriptionsAPI, + DefaultNamespaceAPI, DisableLegacyAccessAPI, DisableLegacyDbfsAPI, + DisableLegacyFeaturesAPI, EnableExportNotebookAPI, EnableIpAccessListsAPI, + EnableNotebookTableClipboardAPI, EnableResultsDownloadingAPI, + EnhancedSecurityMonitoringAPI, EsmEnablementAccountAPI, IpAccessListsAPI, LlmProxyPartnerPoweredAccountAPI, LlmProxyPartnerPoweredEnforceAPI, LlmProxyPartnerPoweredWorkspaceAPI, NetworkConnectivityAPI, NetworkPoliciesAPI, NotificationDestinationsAPI, PersonalComputeAPI, - RestrictWorkspaceAdminsAPI, SettingsAPI, TokenManagementAPI, TokensAPI, - WorkspaceConfAPI, WorkspaceNetworkConfigurationAPI) + RestrictWorkspaceAdminsAPI, SettingsAPI, SqlResultsDownloadAPI, + TokenManagementAPI, TokensAPI, WorkspaceConfAPI, + WorkspaceNetworkConfigurationAPI) from databricks.sdk.service.sharing import (ProvidersAPI, RecipientActivationAPI, RecipientFederationPoliciesAPI, @@ -251,10 +256,11 @@ def __init__( self._credentials = pkg_catalog.CredentialsAPI(self._api_client) self._credentials_manager = pkg_settings.CredentialsManagerAPI(self._api_client) self._current_user = pkg_iam.CurrentUserAPI(self._api_client) + self._custom_llms = pkg_aibuilder.CustomLlmsAPI(self._api_client) self._dashboard_widgets = pkg_sql.DashboardWidgetsAPI(self._api_client) self._dashboards = pkg_sql.DashboardsAPI(self._api_client) self._data_sources = pkg_sql.DataSourcesAPI(self._api_client) - self._database_instances = pkg_catalog.DatabaseInstancesAPI(self._api_client) + self._database = pkg_database.DatabaseAPI(self._api_client) self._dbfs = DbfsExt(self._api_client) self._dbsql_permissions = pkg_sql.DbsqlPermissionsAPI(self._api_client) self._experiments = pkg_ml.ExperimentsAPI(self._api_client) @@ -294,10 +300,10 @@ def __init__( ) self._provider_providers = pkg_marketplace.ProviderProvidersAPI(self._api_client) self._providers = pkg_sharing.ProvidersAPI(self._api_client) + self._quality_monitor_v2 = pkg_qualitymonitorv2.QualityMonitorV2API(self._api_client) self._quality_monitors = pkg_catalog.QualityMonitorsAPI(self._api_client) self._queries = pkg_sql.QueriesAPI(self._api_client) self._queries_legacy = pkg_sql.QueriesLegacyAPI(self._api_client) - self._query_execution = pkg_dashboards.QueryExecutionAPI(self._api_client) self._query_history = pkg_sql.QueryHistoryAPI(self._api_client) self._query_visualizations = pkg_sql.QueryVisualizationsAPI(self._api_client) self._query_visualizations_legacy = pkg_sql.QueryVisualizationsLegacyAPI(self._api_client) @@ -372,7 +378,7 @@ def alerts_legacy(self) -> pkg_sql.AlertsLegacyAPI: @property def alerts_v2(self) -> pkg_sql.AlertsV2API: - """TODO: Add description.""" + """New version of SQL Alerts.""" return self._alerts_v2 @property @@ -465,6 +471,11 @@ def current_user(self) -> pkg_iam.CurrentUserAPI: """This API allows retrieving information about currently authenticated user or service principal.""" return self._current_user + @property + def custom_llms(self) -> pkg_aibuilder.CustomLlmsAPI: + """The Custom LLMs service manages state and powers the UI for the Custom LLM product.""" + return self._custom_llms + @property def dashboard_widgets(self) -> pkg_sql.DashboardWidgetsAPI: """This is an evolving API that facilitates the addition and removal of widgets from existing dashboards within the Databricks Workspace.""" @@ -481,9 +492,9 @@ def data_sources(self) -> pkg_sql.DataSourcesAPI: return self._data_sources @property - def database_instances(self) -> pkg_catalog.DatabaseInstancesAPI: + def database(self) -> pkg_database.DatabaseAPI: """Database Instances provide access to a database via REST API or direct SQL.""" - return self._database_instances + return self._database @property def dbfs(self) -> DbfsExt: @@ -670,6 +681,11 @@ def providers(self) -> pkg_sharing.ProvidersAPI: """A data provider is an object representing the organization in the real world who shares the data.""" return self._providers + @property + def quality_monitor_v2(self) -> pkg_qualitymonitorv2.QualityMonitorV2API: + """Manage data quality of UC objects (currently support `schema`).""" + return self._quality_monitor_v2 + @property def quality_monitors(self) -> pkg_catalog.QualityMonitorsAPI: """A monitor computes and monitors data or model quality metrics for a table over time.""" @@ -685,11 +701,6 @@ def queries_legacy(self) -> pkg_sql.QueriesLegacyAPI: """These endpoints are used for CRUD operations on query definitions.""" return self._queries_legacy - @property - def query_execution(self) -> pkg_dashboards.QueryExecutionAPI: - """Query execution APIs for AI / BI Dashboards.""" - return self._query_execution - @property def query_history(self) -> pkg_sql.QueryHistoryAPI: """A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.""" @@ -1021,7 +1032,7 @@ def ip_access_lists(self) -> pkg_settings.AccountIpAccessListsAPI: @property def log_delivery(self) -> pkg_billing.LogDeliveryAPI: - """These APIs manage log delivery configurations for this account.""" + """These APIs manage Log delivery configurations for this account.""" return self._log_delivery @property @@ -1116,7 +1127,7 @@ def workspace_assignment(self) -> pkg_iam.WorkspaceAssignmentAPI: @property def workspace_network_configuration(self) -> pkg_settings.WorkspaceNetworkConfigurationAPI: - """These APIs allow configuration of network settings for Databricks workspaces.""" + """These APIs allow configuration of network settings for Databricks workspaces by selecting which network policy to associate with the workspace.""" return self._workspace_network_configuration @property diff --git a/databricks/sdk/service/aibuilder.py b/databricks/sdk/service/aibuilder.py new file mode 100755 index 000000000..3b37a2070 --- /dev/null +++ b/databricks/sdk/service/aibuilder.py @@ -0,0 +1,364 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, List, Optional + +from ._internal import _enum, _from_dict, _repeated_dict + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class CancelCustomLlmOptimizationRunRequest: + id: Optional[str] = None + + +@dataclass +class CancelResponse: + def as_dict(self) -> dict: + """Serializes the CancelResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CancelResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CancelResponse: + """Deserializes the CancelResponse from a dictionary.""" + return cls() + + +@dataclass +class CustomLlm: + name: str + """Name of the custom LLM""" + + instructions: str + """Instructions for the custom LLM to follow""" + + optimization_state: State + """If optimization is kicked off, tracks the state of the custom LLM""" + + agent_artifact_path: Optional[str] = None + + creation_time: Optional[str] = None + """Creation timestamp of the custom LLM""" + + creator: Optional[str] = None + """Creator of the custom LLM""" + + datasets: Optional[List[Dataset]] = None + """Datasets used for training and evaluating the model, not for inference""" + + endpoint_name: Optional[str] = None + """Name of the endpoint that will be used to serve the custom LLM""" + + guidelines: Optional[List[str]] = None + """Guidelines for the custom LLM to adhere to""" + + id: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the CustomLlm into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.agent_artifact_path is not None: + body["agent_artifact_path"] = self.agent_artifact_path + if self.creation_time is not None: + body["creation_time"] = self.creation_time + if self.creator is not None: + body["creator"] = self.creator + if self.datasets: + body["datasets"] = [v.as_dict() for v in self.datasets] + if self.endpoint_name is not None: + body["endpoint_name"] = self.endpoint_name + if self.guidelines: + body["guidelines"] = [v for v in self.guidelines] + if self.id is not None: + body["id"] = self.id + if self.instructions is not None: + body["instructions"] = self.instructions + if self.name is not None: + body["name"] = self.name + if self.optimization_state is not None: + body["optimization_state"] = self.optimization_state.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CustomLlm into a shallow dictionary of its immediate attributes.""" + body = {} + if self.agent_artifact_path is not None: + body["agent_artifact_path"] = self.agent_artifact_path + if self.creation_time is not None: + body["creation_time"] = self.creation_time + if self.creator is not None: + body["creator"] = self.creator + if self.datasets: + body["datasets"] = self.datasets + if self.endpoint_name is not None: + body["endpoint_name"] = self.endpoint_name + if self.guidelines: + body["guidelines"] = self.guidelines + if self.id is not None: + body["id"] = self.id + if self.instructions is not None: + body["instructions"] = self.instructions + if self.name is not None: + body["name"] = self.name + if self.optimization_state is not None: + body["optimization_state"] = self.optimization_state + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CustomLlm: + """Deserializes the CustomLlm from a dictionary.""" + return cls( + agent_artifact_path=d.get("agent_artifact_path", None), + creation_time=d.get("creation_time", None), + creator=d.get("creator", None), + datasets=_repeated_dict(d, "datasets", Dataset), + endpoint_name=d.get("endpoint_name", None), + guidelines=d.get("guidelines", None), + id=d.get("id", None), + instructions=d.get("instructions", None), + name=d.get("name", None), + optimization_state=_enum(d, "optimization_state", State), + ) + + +@dataclass +class Dataset: + table: Table + + def as_dict(self) -> dict: + """Serializes the Dataset into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.table: + body["table"] = self.table.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Dataset into a shallow dictionary of its immediate attributes.""" + body = {} + if self.table: + body["table"] = self.table + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Dataset: + """Deserializes the Dataset from a dictionary.""" + return cls(table=_from_dict(d, "table", Table)) + + +@dataclass +class StartCustomLlmOptimizationRunRequest: + id: Optional[str] = None + """The Id of the tile.""" + + +class State(Enum): + """States of Custom LLM optimization lifecycle.""" + + CANCELLED = "CANCELLED" + COMPLETED = "COMPLETED" + CREATED = "CREATED" + FAILED = "FAILED" + PENDING = "PENDING" + RUNNING = "RUNNING" + + +@dataclass +class Table: + table_path: str + """Full UC table path in catalog.schema.table_name format""" + + request_col: str + """Name of the request column""" + + response_col: Optional[str] = None + """Optional: Name of the response column if the data is labeled""" + + def as_dict(self) -> dict: + """Serializes the Table into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.request_col is not None: + body["request_col"] = self.request_col + if self.response_col is not None: + body["response_col"] = self.response_col + if self.table_path is not None: + body["table_path"] = self.table_path + return body + + def as_shallow_dict(self) -> dict: + """Serializes the Table into a shallow dictionary of its immediate attributes.""" + body = {} + if self.request_col is not None: + body["request_col"] = self.request_col + if self.response_col is not None: + body["response_col"] = self.response_col + if self.table_path is not None: + body["table_path"] = self.table_path + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> Table: + """Deserializes the Table from a dictionary.""" + return cls( + request_col=d.get("request_col", None), + response_col=d.get("response_col", None), + table_path=d.get("table_path", None), + ) + + +@dataclass +class UpdateCustomLlmRequest: + custom_llm: CustomLlm + """The CustomLlm containing the fields which should be updated.""" + + update_mask: str + """The list of the CustomLlm fields to update. These should correspond to the values (or lack + thereof) present in `custom_llm`. + + The field mask must be a single string, with multiple fields separated by commas (no spaces). + The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + (e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed, + as only the entire collection field can be specified. Field names must exactly match the + resource field names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the + API changes in the future.""" + + id: Optional[str] = None + """The id of the custom llm""" + + def as_dict(self) -> dict: + """Serializes the UpdateCustomLlmRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.custom_llm: + body["custom_llm"] = self.custom_llm.as_dict() + if self.id is not None: + body["id"] = self.id + if self.update_mask is not None: + body["update_mask"] = self.update_mask + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateCustomLlmRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.custom_llm: + body["custom_llm"] = self.custom_llm + if self.id is not None: + body["id"] = self.id + if self.update_mask is not None: + body["update_mask"] = self.update_mask + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateCustomLlmRequest: + """Deserializes the UpdateCustomLlmRequest from a dictionary.""" + return cls( + custom_llm=_from_dict(d, "custom_llm", CustomLlm), + id=d.get("id", None), + update_mask=d.get("update_mask", None), + ) + + +class CustomLlmsAPI: + """The Custom LLMs service manages state and powers the UI for the Custom LLM product.""" + + def __init__(self, api_client): + self._api = api_client + + def cancel(self, id: str): + """Cancel a Custom LLM Optimization Run. + + :param id: str + + + """ + + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + self._api.do("POST", f"/api/2.0/custom-llms/{id}/optimize/cancel", headers=headers) + + def create(self, id: str) -> CustomLlm: + """Start a Custom LLM Optimization Run. + + :param id: str + The Id of the tile. + + :returns: :class:`CustomLlm` + """ + + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", f"/api/2.0/custom-llms/{id}/optimize", headers=headers) + return CustomLlm.from_dict(res) + + def get(self, id: str) -> CustomLlm: + """Get a Custom LLM. + + :param id: str + The id of the custom llm + + :returns: :class:`CustomLlm` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/custom-llms/{id}", headers=headers) + return CustomLlm.from_dict(res) + + def update(self, id: str, custom_llm: CustomLlm, update_mask: str) -> CustomLlm: + """Update a Custom LLM. + + :param id: str + The id of the custom llm + :param custom_llm: :class:`CustomLlm` + The CustomLlm containing the fields which should be updated. + :param update_mask: str + The list of the CustomLlm fields to update. These should correspond to the values (or lack thereof) + present in `custom_llm`. + + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`CustomLlm` + """ + body = {} + if custom_llm is not None: + body["custom_llm"] = custom_llm.as_dict() + if update_mask is not None: + body["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/custom-llms/{id}", body=body, headers=headers) + return CustomLlm.from_dict(res) diff --git a/databricks/sdk/service/billing.py b/databricks/sdk/service/billing.py index 3595e4026..4dc535891 100755 --- a/databricks/sdk/service/billing.py +++ b/databricks/sdk/service/billing.py @@ -717,14 +717,12 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateBudgetPolicyRequest: @dataclass class CreateLogDeliveryConfigurationParams: + """* Log Delivery Configuration""" + log_type: LogType - """Log delivery type. Supported values are: - - * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the CSV schema, see the - [View billable usage]. - - * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, see [Configure audit - logging] + """Log delivery type. Supported values are: * `BILLABLE_USAGE` — Configure [billable usage log + delivery]. For the CSV schema, see the [View billable usage]. * `AUDIT_LOGS` — Configure + [audit log delivery]. For the JSON schema, see [Configure audit logging] [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html @@ -732,12 +730,11 @@ class CreateLogDeliveryConfigurationParams: [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html""" output_format: OutputFormat - """The file type of log delivery. - - * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the CSV (comma-separated - values) format is supported. For the schema, see the [View billable usage] * If `log_type` is - `AUDIT_LOGS`, this value must be `JSON`. Only the JSON (JavaScript Object Notation) format is - supported. For the schema, see the [Configuring audit logs]. + """The file type of log delivery. * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. + Only the CSV (comma-separated values) format is supported. For the schema, see the [View + billable usage] * If `log_type` is `AUDIT_LOGS`, this value must be `JSON`. Only the JSON + (JavaScript Object Notation) format is supported. For the schema, see the [Configuring audit + logs]. [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html""" @@ -764,9 +761,9 @@ class CreateLogDeliveryConfigurationParams: start or end with a slash character.""" delivery_start_time: Optional[str] = None - """This field applies only if `log_type` is `BILLABLE_USAGE`. This is the optional start month and - year for delivery, specified in `YYYY-MM` format. Defaults to current year and month. - `BILLABLE_USAGE` logs are not available for usage before March 2019 (`2019-03`).""" + """This field applies only if log_type is BILLABLE_USAGE. This is the optional start month and year + for delivery, specified in YYYY-MM format. Defaults to current year and month. BILLABLE_USAGE + logs are not available for usage before March 2019 (2019-03).""" status: Optional[LogDeliveryConfigStatus] = None """Status of log delivery configuration. Set to `ENABLED` (enabled) or `DISABLED` (disabled). @@ -885,12 +882,12 @@ def from_dict(cls, d: Dict[str, Any]) -> DeleteResponse: class DeliveryStatus(Enum): - """The status string for log delivery. Possible values are: * `CREATED`: There were no log delivery - attempts since the config was created. * `SUCCEEDED`: The latest attempt of log delivery has - succeeded completely. * `USER_FAILURE`: The latest attempt of log delivery failed because of - misconfiguration of customer provided permissions on role or storage. * `SYSTEM_FAILURE`: The + """* The status string for log delivery. Possible values are: `CREATED`: There were no log delivery + attempts since the config was created. `SUCCEEDED`: The latest attempt of log delivery has + succeeded completely. `USER_FAILURE`: The latest attempt of log delivery failed because of + misconfiguration of customer provided permissions on role or storage. `SYSTEM_FAILURE`: The latest attempt of log delivery failed because of an Databricks internal error. Contact support - if it doesn't go away soon. * `NOT_FOUND`: The log delivery status as the configuration has been + if it doesn't go away soon. `NOT_FOUND`: The log delivery status as the configuration has been disabled since the release of this feature or there are no workspaces in the account.""" CREATED = "CREATED" @@ -1026,6 +1023,31 @@ def from_dict(cls, d: Dict[str, Any]) -> GetBudgetConfigurationResponse: return cls(budget=_from_dict(d, "budget", BudgetConfiguration)) +@dataclass +class GetLogDeliveryConfigurationResponse: + log_delivery_configuration: Optional[LogDeliveryConfiguration] = None + """The fetched log delivery configuration""" + + def as_dict(self) -> dict: + """Serializes the GetLogDeliveryConfigurationResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.log_delivery_configuration: + body["log_delivery_configuration"] = self.log_delivery_configuration.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GetLogDeliveryConfigurationResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.log_delivery_configuration: + body["log_delivery_configuration"] = self.log_delivery_configuration + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GetLogDeliveryConfigurationResponse: + """Deserializes the GetLogDeliveryConfigurationResponse from a dictionary.""" + return cls(log_delivery_configuration=_from_dict(d, "log_delivery_configuration", LogDeliveryConfiguration)) + + @dataclass class LimitConfig: """The limit configuration of the policy. Limit configuration provide a budget policy level cost @@ -1128,10 +1150,10 @@ def from_dict(cls, d: Dict[str, Any]) -> ListBudgetPoliciesResponse: class LogDeliveryConfigStatus(Enum): - """Status of log delivery configuration. Set to `ENABLED` (enabled) or `DISABLED` (disabled). - Defaults to `ENABLED`. You can [enable or disable the - configuration](#operation/patch-log-delivery-config-status) later. Deletion of a configuration - is not supported, so disable a log delivery configuration that is no longer needed.""" + """* Log Delivery Status + + `ENABLED`: All dependencies have executed and succeeded `DISABLED`: At least one dependency has + succeeded""" DISABLED = "DISABLED" ENABLED = "ENABLED" @@ -1139,62 +1161,65 @@ class LogDeliveryConfigStatus(Enum): @dataclass class LogDeliveryConfiguration: - account_id: Optional[str] = None - """The Databricks account ID that hosts the log delivery configuration.""" + """* Log Delivery Configuration""" - config_id: Optional[str] = None - """Databricks log delivery configuration ID.""" + log_type: LogType + """Log delivery type. Supported values are: * `BILLABLE_USAGE` — Configure [billable usage log + delivery]. For the CSV schema, see the [View billable usage]. * `AUDIT_LOGS` — Configure + [audit log delivery]. For the JSON schema, see [Configure audit logging] + + [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html""" - config_name: Optional[str] = None - """The optional human-readable name of the log delivery configuration. Defaults to empty.""" + output_format: OutputFormat + """The file type of log delivery. * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. + Only the CSV (comma-separated values) format is supported. For the schema, see the [View + billable usage] * If `log_type` is `AUDIT_LOGS`, this value must be `JSON`. Only the JSON + (JavaScript Object Notation) format is supported. For the schema, see the [Configuring audit + logs]. + + [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html + [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html""" - creation_time: Optional[int] = None - """Time in epoch milliseconds when the log delivery configuration was created.""" + account_id: str + """Databricks account ID.""" - credentials_id: Optional[str] = None + credentials_id: str """The ID for a method:credentials/create that represents the AWS IAM role with policy and trust relationship as described in the main billable usage documentation page. See [Configure billable usage delivery]. [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html""" + storage_configuration_id: str + """The ID for a method:storage/create that represents the S3 bucket with bucket policy as described + in the main billable usage documentation page. See [Configure billable usage delivery]. + + [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html""" + + config_id: Optional[str] = None + """The unique UUID of log delivery configuration""" + + config_name: Optional[str] = None + """The optional human-readable name of the log delivery configuration. Defaults to empty.""" + + creation_time: Optional[int] = None + """Time in epoch milliseconds when the log delivery configuration was created.""" + delivery_path_prefix: Optional[str] = None """The optional delivery path prefix within Amazon S3 storage. Defaults to empty, which means that logs are delivered to the root of the bucket. This must be a valid S3 object key. This must not start or end with a slash character.""" delivery_start_time: Optional[str] = None - """This field applies only if `log_type` is `BILLABLE_USAGE`. This is the optional start month and - year for delivery, specified in `YYYY-MM` format. Defaults to current year and month. - `BILLABLE_USAGE` logs are not available for usage before March 2019 (`2019-03`).""" + """This field applies only if log_type is BILLABLE_USAGE. This is the optional start month and year + for delivery, specified in YYYY-MM format. Defaults to current year and month. BILLABLE_USAGE + logs are not available for usage before March 2019 (2019-03).""" log_delivery_status: Optional[LogDeliveryStatus] = None - """Databricks log delivery status.""" - - log_type: Optional[LogType] = None - """Log delivery type. Supported values are: - - * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the CSV schema, see the - [View billable usage]. - - * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, see [Configure audit - logging] - - [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html - [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html""" - - output_format: Optional[OutputFormat] = None - """The file type of log delivery. - - * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the CSV (comma-separated - values) format is supported. For the schema, see the [View billable usage] * If `log_type` is - `AUDIT_LOGS`, this value must be `JSON`. Only the JSON (JavaScript Object Notation) format is - supported. For the schema, see the [Configuring audit logs]. - - [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html""" + """The LogDeliveryStatus of this log delivery configuration""" status: Optional[LogDeliveryConfigStatus] = None """Status of log delivery configuration. Set to `ENABLED` (enabled) or `DISABLED` (disabled). @@ -1202,12 +1227,6 @@ class LogDeliveryConfiguration: configuration](#operation/patch-log-delivery-config-status) later. Deletion of a configuration is not supported, so disable a log delivery configuration that is no longer needed.""" - storage_configuration_id: Optional[str] = None - """The ID for a method:storage/create that represents the S3 bucket with bucket policy as described - in the main billable usage documentation page. See [Configure billable usage delivery]. - - [Configure billable usage delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html""" - update_time: Optional[int] = None """Time in epoch milliseconds when the log delivery configuration was updated.""" @@ -1312,20 +1331,8 @@ def from_dict(cls, d: Dict[str, Any]) -> LogDeliveryConfiguration: @dataclass class LogDeliveryStatus: - """Databricks log delivery status.""" - - last_attempt_time: Optional[str] = None - """The UTC time for the latest log delivery attempt.""" - - last_successful_attempt_time: Optional[str] = None - """The UTC time for the latest successful log delivery.""" - - message: Optional[str] = None - """Informative message about the latest log delivery attempt. If the log delivery fails with - USER_FAILURE, error details will be provided for fixing misconfigurations in cloud permissions.""" - - status: Optional[DeliveryStatus] = None - """The status string for log delivery. Possible values are: * `CREATED`: There were no log delivery + status: DeliveryStatus + """Enum that describes the status. Possible values are: * `CREATED`: There were no log delivery attempts since the config was created. * `SUCCEEDED`: The latest attempt of log delivery has succeeded completely. * `USER_FAILURE`: The latest attempt of log delivery failed because of misconfiguration of customer provided permissions on role or storage. * `SYSTEM_FAILURE`: The @@ -1333,6 +1340,16 @@ class LogDeliveryStatus: if it doesn't go away soon. * `NOT_FOUND`: The log delivery status as the configuration has been disabled since the release of this feature or there are no workspaces in the account.""" + message: str + """Informative message about the latest log delivery attempt. If the log delivery fails with + USER_FAILURE, error details will be provided for fixing misconfigurations in cloud permissions.""" + + last_attempt_time: Optional[str] = None + """The UTC time for the latest log delivery attempt.""" + + last_successful_attempt_time: Optional[str] = None + """The UTC time for the latest successful log delivery.""" + def as_dict(self) -> dict: """Serializes the LogDeliveryStatus into a dictionary suitable for use as a JSON request body.""" body = {} @@ -1371,34 +1388,14 @@ def from_dict(cls, d: Dict[str, Any]) -> LogDeliveryStatus: class LogType(Enum): - """Log delivery type. Supported values are: - - * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the CSV schema, see the - [View billable usage]. - - * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, see [Configure audit - logging] - - [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html - [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html - """ + """* Log Delivery Type""" AUDIT_LOGS = "AUDIT_LOGS" BILLABLE_USAGE = "BILLABLE_USAGE" class OutputFormat(Enum): - """The file type of log delivery. - - * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the CSV (comma-separated - values) format is supported. For the schema, see the [View billable usage] * If `log_type` is - `AUDIT_LOGS`, this value must be `JSON`. Only the JSON (JavaScript Object Notation) format is - supported. For the schema, see the [Configuring audit logs]. - - [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html""" + """* Log Delivery Output Format""" CSV = "CSV" JSON = "JSON" @@ -1580,6 +1577,8 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateBudgetConfigurationResponse: @dataclass class UpdateLogDeliveryConfigurationStatusRequest: + """* Update Log Delivery Configuration""" + status: LogDeliveryConfigStatus """Status of log delivery configuration. Set to `ENABLED` (enabled) or `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable the @@ -1587,7 +1586,7 @@ class UpdateLogDeliveryConfigurationStatusRequest: is not supported, so disable a log delivery configuration that is no longer needed.""" log_delivery_configuration_id: Optional[str] = None - """Databricks log delivery configuration ID""" + """The log delivery configuration id of customer""" def as_dict(self) -> dict: """Serializes the UpdateLogDeliveryConfigurationStatusRequest into a dictionary suitable for use as a JSON request body.""" @@ -1624,7 +1623,10 @@ class UsageDashboardType(Enum): @dataclass class WrappedCreateLogDeliveryConfiguration: - log_delivery_configuration: Optional[CreateLogDeliveryConfigurationParams] = None + """* Properties of the new log delivery configuration.""" + + log_delivery_configuration: CreateLogDeliveryConfigurationParams + """* Log Delivery Configuration""" def as_dict(self) -> dict: """Serializes the WrappedCreateLogDeliveryConfiguration into a dictionary suitable for use as a JSON request body.""" @@ -1651,6 +1653,7 @@ def from_dict(cls, d: Dict[str, Any]) -> WrappedCreateLogDeliveryConfiguration: @dataclass class WrappedLogDeliveryConfiguration: log_delivery_configuration: Optional[LogDeliveryConfiguration] = None + """The created log delivery configuration""" def as_dict(self) -> dict: """Serializes the WrappedLogDeliveryConfiguration into a dictionary suitable for use as a JSON request body.""" @@ -1675,12 +1678,19 @@ def from_dict(cls, d: Dict[str, Any]) -> WrappedLogDeliveryConfiguration: @dataclass class WrappedLogDeliveryConfigurations: log_delivery_configurations: Optional[List[LogDeliveryConfiguration]] = None + """Log delivery configurations were returned successfully.""" + + next_page_token: Optional[str] = None + """Token which can be sent as `page_token` to retrieve the next page of results. If this field is + omitted, there are no subsequent budgets.""" def as_dict(self) -> dict: """Serializes the WrappedLogDeliveryConfigurations into a dictionary suitable for use as a JSON request body.""" body = {} if self.log_delivery_configurations: body["log_delivery_configurations"] = [v.as_dict() for v in self.log_delivery_configurations] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token return body def as_shallow_dict(self) -> dict: @@ -1688,13 +1698,16 @@ def as_shallow_dict(self) -> dict: body = {} if self.log_delivery_configurations: body["log_delivery_configurations"] = self.log_delivery_configurations + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> WrappedLogDeliveryConfigurations: """Deserializes the WrappedLogDeliveryConfigurations from a dictionary.""" return cls( - log_delivery_configurations=_repeated_dict(d, "log_delivery_configurations", LogDeliveryConfiguration) + log_delivery_configurations=_repeated_dict(d, "log_delivery_configurations", LogDeliveryConfiguration), + next_page_token=d.get("next_page_token", None), ) @@ -2031,58 +2044,14 @@ def update(self, budget_id: str, budget: UpdateBudgetConfigurationBudget) -> Upd class LogDeliveryAPI: - """These APIs manage log delivery configurations for this account. The two supported log types for this API - are _billable usage logs_ and _audit logs_. This feature is in Public Preview. This feature works with all - account ID types. - - Log delivery works with all account types. However, if your account is on the E2 version of the platform - or on a select custom plan that allows multiple workspaces per account, you can optionally configure - different storage destinations for each workspace. Log delivery status is also provided to know the latest - status of log delivery attempts. The high-level flow of billable usage delivery: - - 1. **Create storage**: In AWS, [create a new AWS S3 bucket] with a specific bucket policy. Using - Databricks APIs, call the Account API to create a [storage configuration object](:method:Storage/Create) - that uses the bucket name. 2. **Create credentials**: In AWS, create the appropriate AWS IAM role. For - full details, including the required IAM role policies and trust relationship, see [Billable usage log - delivery]. Using Databricks APIs, call the Account API to create a [credential configuration - object](:method:Credentials/Create) that uses the IAM role"s ARN. 3. **Create log delivery - configuration**: Using Databricks APIs, call the Account API to [create a log delivery - configuration](:method:LogDelivery/Create) that uses the credential and storage configuration objects from - previous steps. You can specify if the logs should include all events of that log type in your account - (_Account level_ delivery) or only events for a specific set of workspaces (_workspace level_ delivery). - Account level log delivery applies to all current and future workspaces plus account level logs, while - workspace level log delivery solely delivers logs related to the specified workspaces. You can create - multiple types of delivery configurations per account. - - For billable usage delivery: * For more information about billable usage logs, see [Billable usage log - delivery]. For the CSV schema, see the [Usage page]. * The delivery location is - `//billable-usage/csv/`, where `` is the name of the optional delivery path - prefix you set up during log delivery configuration. Files are named - `workspaceId=-usageMonth=.csv`. * All billable usage logs apply to specific - workspaces (_workspace level_ logs). You can aggregate usage for your entire account by creating an - _account level_ delivery configuration that delivers logs for all current and future workspaces in your - account. * The files are delivered daily by overwriting the month's CSV file for each workspace. - - For audit log delivery: * For more information about about audit log delivery, see [Audit log delivery], - which includes information about the used JSON schema. * The delivery location is - `//workspaceId=/date=/auditlogs_.json`. - Files may get overwritten with the same content multiple times to achieve exactly-once delivery. * If the - audit log delivery configuration included specific workspace IDs, only _workspace-level_ audit logs for - those workspaces are delivered. If the log delivery configuration applies to the entire account (_account - level_ delivery configuration), the audit log delivery includes workspace-level audit logs for all - workspaces in the account as well as account-level audit logs. See [Audit log delivery] for details. * - Auditable events are typically available in logs within 15 minutes. - - [Audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [Billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html - [Usage page]: https://docs.databricks.com/administration-guide/account-settings/usage.html - [create a new AWS S3 bucket]: https://docs.databricks.com/administration-guide/account-api/aws-storage.html""" + """These APIs manage Log delivery configurations for this account. Log delivery configs enable you to + configure the delivery of the specified type of logs to your storage account.""" def __init__(self, api_client): self._api = api_client def create( - self, *, log_delivery_configuration: Optional[CreateLogDeliveryConfigurationParams] = None + self, log_delivery_configuration: CreateLogDeliveryConfigurationParams ) -> WrappedLogDeliveryConfiguration: """Create a new log delivery configuration. @@ -2107,7 +2076,8 @@ def create( [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [Deliver and access billable usage logs]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html - :param log_delivery_configuration: :class:`CreateLogDeliveryConfigurationParams` (optional) + :param log_delivery_configuration: :class:`CreateLogDeliveryConfigurationParams` + * Log Delivery Configuration :returns: :class:`WrappedLogDeliveryConfiguration` """ @@ -2122,15 +2092,15 @@ def create( res = self._api.do("POST", f"/api/2.0/accounts/{self._api.account_id}/log-delivery", body=body, headers=headers) return WrappedLogDeliveryConfiguration.from_dict(res) - def get(self, log_delivery_configuration_id: str) -> WrappedLogDeliveryConfiguration: + def get(self, log_delivery_configuration_id: str) -> GetLogDeliveryConfigurationResponse: """Get log delivery configuration. Gets a Databricks log delivery configuration object for an account, both specified by ID. :param log_delivery_configuration_id: str - Databricks log delivery configuration ID + The log delivery configuration id of customer - :returns: :class:`WrappedLogDeliveryConfiguration` + :returns: :class:`GetLogDeliveryConfigurationResponse` """ headers = { @@ -2142,12 +2112,13 @@ def get(self, log_delivery_configuration_id: str) -> WrappedLogDeliveryConfigura f"/api/2.0/accounts/{self._api.account_id}/log-delivery/{log_delivery_configuration_id}", headers=headers, ) - return WrappedLogDeliveryConfiguration.from_dict(res) + return GetLogDeliveryConfigurationResponse.from_dict(res) def list( self, *, credentials_id: Optional[str] = None, + page_token: Optional[str] = None, status: Optional[LogDeliveryConfigStatus] = None, storage_configuration_id: Optional[str] = None, ) -> Iterator[LogDeliveryConfiguration]: @@ -2156,11 +2127,14 @@ def list( Gets all Databricks log delivery configurations associated with an account specified by ID. :param credentials_id: str (optional) - Filter by credential configuration ID. + The Credentials id to filter the search results with + :param page_token: str (optional) + A page token received from a previous get all budget configurations call. This token can be used to + retrieve the subsequent page. Requests first page if absent. :param status: :class:`LogDeliveryConfigStatus` (optional) - Filter by status `ENABLED` or `DISABLED`. + The log delivery status to filter the search results with :param storage_configuration_id: str (optional) - Filter by storage configuration ID. + The Storage Configuration id to filter the search results with :returns: Iterator over :class:`LogDeliveryConfiguration` """ @@ -2168,6 +2142,8 @@ def list( query = {} if credentials_id is not None: query["credentials_id"] = credentials_id + if page_token is not None: + query["page_token"] = page_token if status is not None: query["status"] = status.value if storage_configuration_id is not None: @@ -2176,11 +2152,16 @@ def list( "Accept": "application/json", } - json = self._api.do( - "GET", f"/api/2.0/accounts/{self._api.account_id}/log-delivery", query=query, headers=headers - ) - parsed = WrappedLogDeliveryConfigurations.from_dict(json).log_delivery_configurations - return parsed if parsed is not None else [] + while True: + json = self._api.do( + "GET", f"/api/2.0/accounts/{self._api.account_id}/log-delivery", query=query, headers=headers + ) + if "log_delivery_configurations" in json: + for v in json["log_delivery_configurations"]: + yield LogDeliveryConfiguration.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] def patch_status(self, log_delivery_configuration_id: str, status: LogDeliveryConfigStatus): """Enable or disable log delivery configuration. @@ -2191,7 +2172,7 @@ def patch_status(self, log_delivery_configuration_id: str, status: LogDeliveryCo under [Create log delivery](:method:LogDelivery/Create). :param log_delivery_configuration_id: str - Databricks log delivery configuration ID + The log delivery configuration id of customer :param status: :class:`LogDeliveryConfigStatus` Status of log delivery configuration. Set to `ENABLED` (enabled) or `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable the diff --git a/databricks/sdk/service/catalog.py b/databricks/sdk/service/catalog.py index 9553d8877..f1819bf54 100755 --- a/databricks/sdk/service/catalog.py +++ b/databricks/sdk/service/catalog.py @@ -1414,7 +1414,7 @@ class ConnectionInfo: """Username of current owner of the connection.""" properties: Optional[Dict[str, str]] = None - """An object containing map of key-value properties attached to the connection.""" + """A map of key-value properties attached to the securable.""" provisioning_info: Optional[ProvisioningInfo] = None """Status of an asynchronously provisioned resource.""" @@ -1422,7 +1422,8 @@ class ConnectionInfo: read_only: Optional[bool] = None """If the connection is read only.""" - securable_type: Optional[str] = None + securable_type: Optional[SecurableType] = None + """The type of Unity Catalog securable.""" updated_at: Optional[int] = None """Time at which this connection was updated, in epoch milliseconds.""" @@ -1465,7 +1466,7 @@ def as_dict(self) -> dict: if self.read_only is not None: body["read_only"] = self.read_only if self.securable_type is not None: - body["securable_type"] = self.securable_type + body["securable_type"] = self.securable_type.value if self.updated_at is not None: body["updated_at"] = self.updated_at if self.updated_by is not None: @@ -1533,7 +1534,7 @@ def from_dict(cls, d: Dict[str, Any]) -> ConnectionInfo: properties=d.get("properties", None), provisioning_info=_from_dict(d, "provisioning_info", ProvisioningInfo), read_only=d.get("read_only", None), - securable_type=d.get("securable_type", None), + securable_type=_enum(d, "securable_type", SecurableType), updated_at=d.get("updated_at", None), updated_by=d.get("updated_by", None), url=d.get("url", None), @@ -1541,21 +1542,28 @@ def from_dict(cls, d: Dict[str, Any]) -> ConnectionInfo: class ConnectionType(Enum): - """The type of connection.""" + """Next Id: 31""" BIGQUERY = "BIGQUERY" DATABRICKS = "DATABRICKS" + GA4_RAW_DATA = "GA4_RAW_DATA" GLUE = "GLUE" HIVE_METASTORE = "HIVE_METASTORE" HTTP = "HTTP" MYSQL = "MYSQL" ORACLE = "ORACLE" POSTGRESQL = "POSTGRESQL" + POWER_BI = "POWER_BI" REDSHIFT = "REDSHIFT" + SALESFORCE = "SALESFORCE" + SALESFORCE_DATA_CLOUD = "SALESFORCE_DATA_CLOUD" + SERVICENOW = "SERVICENOW" SNOWFLAKE = "SNOWFLAKE" SQLDW = "SQLDW" SQLSERVER = "SQLSERVER" TERADATA = "TERADATA" + UNKNOWN_CONNECTION_TYPE = "UNKNOWN_CONNECTION_TYPE" + WORKDAY_RAAS = "WORKDAY_RAAS" @dataclass @@ -1706,7 +1714,7 @@ class CreateConnection: """User-provided free-form text description.""" properties: Optional[Dict[str, str]] = None - """An object containing map of key-value properties attached to the connection.""" + """A map of key-value properties attached to the securable.""" read_only: Optional[bool] = None """If the connection is read only.""" @@ -2202,9 +2210,7 @@ class CreateMetastore: """The user-specified name of the metastore.""" region: Optional[str] = None - """Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). The field can be omitted - in the __workspace-level__ __API__ but not in the __account-level__ __API__. If this field is - omitted, the region of the workspace receiving the request will be used.""" + """Cloud region which the metastore serves (e.g., `us-west-2`, `westus`).""" storage_root: Optional[str] = None """The storage root URL for metastore""" @@ -2243,7 +2249,7 @@ class CreateMetastoreAssignment: """The unique ID of the metastore.""" default_catalog_name: str - """The name of the default catalog in the metastore. This field is depracted. Please use "Default + """The name of the default catalog in the metastore. This field is deprecated. Please use "Default Namespace API" to configure the default catalog for a Databricks workspace.""" workspace_id: Optional[int] = None @@ -2924,9 +2930,19 @@ class CredentialPurpose(Enum): class CredentialType(Enum): - """The type of credential.""" + """Next Id: 12""" BEARER_TOKEN = "BEARER_TOKEN" + OAUTH_ACCESS_TOKEN = "OAUTH_ACCESS_TOKEN" + OAUTH_M2M = "OAUTH_M2M" + OAUTH_REFRESH_TOKEN = "OAUTH_REFRESH_TOKEN" + OAUTH_RESOURCE_OWNER_PASSWORD = "OAUTH_RESOURCE_OWNER_PASSWORD" + OAUTH_U2M = "OAUTH_U2M" + OAUTH_U2M_MAPPING = "OAUTH_U2M_MAPPING" + OIDC_TOKEN = "OIDC_TOKEN" + PEM_PRIVATE_KEY = "PEM_PRIVATE_KEY" + SERVICE_CREDENTIAL = "SERVICE_CREDENTIAL" + UNKNOWN_CREDENTIAL_TYPE = "UNKNOWN_CREDENTIAL_TYPE" USERNAME_PASSWORD = "USERNAME_PASSWORD" @@ -2990,183 +3006,6 @@ class DataSourceFormat(Enum): WORKDAY_RAAS_FORMAT = "WORKDAY_RAAS_FORMAT" -@dataclass -class DatabaseCatalog: - name: str - """The name of the catalog in UC.""" - - database_instance_name: str - """The name of the DatabaseInstance housing the database.""" - - database_name: str - """The name of the database (in a instance) associated with the catalog.""" - - create_database_if_not_exists: Optional[bool] = None - - uid: Optional[str] = None - - def as_dict(self) -> dict: - """Serializes the DatabaseCatalog into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.create_database_if_not_exists is not None: - body["create_database_if_not_exists"] = self.create_database_if_not_exists - if self.database_instance_name is not None: - body["database_instance_name"] = self.database_instance_name - if self.database_name is not None: - body["database_name"] = self.database_name - if self.name is not None: - body["name"] = self.name - if self.uid is not None: - body["uid"] = self.uid - return body - - def as_shallow_dict(self) -> dict: - """Serializes the DatabaseCatalog into a shallow dictionary of its immediate attributes.""" - body = {} - if self.create_database_if_not_exists is not None: - body["create_database_if_not_exists"] = self.create_database_if_not_exists - if self.database_instance_name is not None: - body["database_instance_name"] = self.database_instance_name - if self.database_name is not None: - body["database_name"] = self.database_name - if self.name is not None: - body["name"] = self.name - if self.uid is not None: - body["uid"] = self.uid - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> DatabaseCatalog: - """Deserializes the DatabaseCatalog from a dictionary.""" - return cls( - create_database_if_not_exists=d.get("create_database_if_not_exists", None), - database_instance_name=d.get("database_instance_name", None), - database_name=d.get("database_name", None), - name=d.get("name", None), - uid=d.get("uid", None), - ) - - -@dataclass -class DatabaseInstance: - """A DatabaseInstance represents a logical Postgres instance, comprised of both compute and - storage.""" - - name: str - """The name of the instance. This is the unique identifier for the instance.""" - - admin_password: Optional[str] = None - """Password for admin user to create. If not provided, no user will be created.""" - - admin_rolename: Optional[str] = None - """Name of the admin role for the instance. If not provided, defaults to 'databricks_admin'.""" - - capacity: Optional[str] = None - """The sku of the instance. Valid values are "CU_1", "CU_2", "CU_4".""" - - creation_time: Optional[str] = None - """The timestamp when the instance was created.""" - - creator: Optional[str] = None - """The email of the creator of the instance.""" - - pg_version: Optional[str] = None - """The version of Postgres running on the instance.""" - - read_write_dns: Optional[str] = None - """The DNS endpoint to connect to the instance for read+write access.""" - - state: Optional[DatabaseInstanceState] = None - """The current state of the instance.""" - - stopped: Optional[bool] = None - """Whether the instance is stopped.""" - - uid: Optional[str] = None - """An immutable UUID identifier for the instance.""" - - def as_dict(self) -> dict: - """Serializes the DatabaseInstance into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.admin_password is not None: - body["admin_password"] = self.admin_password - if self.admin_rolename is not None: - body["admin_rolename"] = self.admin_rolename - if self.capacity is not None: - body["capacity"] = self.capacity - if self.creation_time is not None: - body["creation_time"] = self.creation_time - if self.creator is not None: - body["creator"] = self.creator - if self.name is not None: - body["name"] = self.name - if self.pg_version is not None: - body["pg_version"] = self.pg_version - if self.read_write_dns is not None: - body["read_write_dns"] = self.read_write_dns - if self.state is not None: - body["state"] = self.state.value - if self.stopped is not None: - body["stopped"] = self.stopped - if self.uid is not None: - body["uid"] = self.uid - return body - - def as_shallow_dict(self) -> dict: - """Serializes the DatabaseInstance into a shallow dictionary of its immediate attributes.""" - body = {} - if self.admin_password is not None: - body["admin_password"] = self.admin_password - if self.admin_rolename is not None: - body["admin_rolename"] = self.admin_rolename - if self.capacity is not None: - body["capacity"] = self.capacity - if self.creation_time is not None: - body["creation_time"] = self.creation_time - if self.creator is not None: - body["creator"] = self.creator - if self.name is not None: - body["name"] = self.name - if self.pg_version is not None: - body["pg_version"] = self.pg_version - if self.read_write_dns is not None: - body["read_write_dns"] = self.read_write_dns - if self.state is not None: - body["state"] = self.state - if self.stopped is not None: - body["stopped"] = self.stopped - if self.uid is not None: - body["uid"] = self.uid - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> DatabaseInstance: - """Deserializes the DatabaseInstance from a dictionary.""" - return cls( - admin_password=d.get("admin_password", None), - admin_rolename=d.get("admin_rolename", None), - capacity=d.get("capacity", None), - creation_time=d.get("creation_time", None), - creator=d.get("creator", None), - name=d.get("name", None), - pg_version=d.get("pg_version", None), - read_write_dns=d.get("read_write_dns", None), - state=_enum(d, "state", DatabaseInstanceState), - stopped=d.get("stopped", None), - uid=d.get("uid", None), - ) - - -class DatabaseInstanceState(Enum): - - AVAILABLE = "AVAILABLE" - DELETING = "DELETING" - FAILING_OVER = "FAILING_OVER" - STARTING = "STARTING" - STOPPED = "STOPPED" - UPDATING = "UPDATING" - - @dataclass class DatabricksGcpServiceAccount: """GCP long-lived credential. Databricks-created Google Cloud Storage service account.""" @@ -3300,42 +3139,6 @@ def from_dict(cls, d: Dict[str, Any]) -> DeleteCredentialResponse: return cls() -@dataclass -class DeleteDatabaseCatalogResponse: - def as_dict(self) -> dict: - """Serializes the DeleteDatabaseCatalogResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the DeleteDatabaseCatalogResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> DeleteDatabaseCatalogResponse: - """Deserializes the DeleteDatabaseCatalogResponse from a dictionary.""" - return cls() - - -@dataclass -class DeleteDatabaseInstanceResponse: - def as_dict(self) -> dict: - """Serializes the DeleteDatabaseInstanceResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the DeleteDatabaseInstanceResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> DeleteDatabaseInstanceResponse: - """Deserializes the DeleteDatabaseInstanceResponse from a dictionary.""" - return cls() - - @dataclass class DeleteResponse: def as_dict(self) -> dict: @@ -3354,24 +3157,6 @@ def from_dict(cls, d: Dict[str, Any]) -> DeleteResponse: return cls() -@dataclass -class DeleteSyncedDatabaseTableResponse: - def as_dict(self) -> dict: - """Serializes the DeleteSyncedDatabaseTableResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the DeleteSyncedDatabaseTableResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> DeleteSyncedDatabaseTableResponse: - """Deserializes the DeleteSyncedDatabaseTableResponse from a dictionary.""" - return cls() - - @dataclass class DeltaRuntimePropertiesKvPairs: """Properties pertaining to the current state of the delta table as given by the commit server. @@ -3400,6 +3185,12 @@ def from_dict(cls, d: Dict[str, Any]) -> DeltaRuntimePropertiesKvPairs: return cls(delta_runtime_properties=d.get("delta_runtime_properties", None)) +class DeltaSharingScopeEnum(Enum): + + INTERNAL = "INTERNAL" + INTERNAL_AND_EXTERNAL = "INTERNAL_AND_EXTERNAL" + + @dataclass class Dependency: """A dependency of a SQL object. Either the __table__ field or the __function__ field must be @@ -3484,12 +3275,18 @@ def from_dict(cls, d: Dict[str, Any]) -> DisableResponse: @dataclass class EffectivePermissionsList: + next_page_token: Optional[str] = None + """Opaque token to retrieve the next page of results. Absent if there are no more pages. + __page_token__ should be set to this value for the next request (for the next page of results).""" + privilege_assignments: Optional[List[EffectivePrivilegeAssignment]] = None """The privileges conveyed to each principal (either directly or via inheritance)""" def as_dict(self) -> dict: """Serializes the EffectivePermissionsList into a dictionary suitable for use as a JSON request body.""" body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token if self.privilege_assignments: body["privilege_assignments"] = [v.as_dict() for v in self.privilege_assignments] return body @@ -3497,6 +3294,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the EffectivePermissionsList into a shallow dictionary of its immediate attributes.""" body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token if self.privilege_assignments: body["privilege_assignments"] = self.privilege_assignments return body @@ -3504,7 +3303,10 @@ def as_shallow_dict(self) -> dict: @classmethod def from_dict(cls, d: Dict[str, Any]) -> EffectivePermissionsList: """Deserializes the EffectivePermissionsList from a dictionary.""" - return cls(privilege_assignments=_repeated_dict(d, "privilege_assignments", EffectivePrivilegeAssignment)) + return cls( + next_page_token=d.get("next_page_token", None), + privilege_assignments=_repeated_dict(d, "privilege_assignments", EffectivePrivilegeAssignment), + ) @dataclass @@ -4847,7 +4649,7 @@ class GetMetastoreSummaryResponse: delta_sharing_recipient_token_lifetime_in_seconds: Optional[int] = None """The lifetime of delta sharing recipient token in seconds.""" - delta_sharing_scope: Optional[GetMetastoreSummaryResponseDeltaSharingScope] = None + delta_sharing_scope: Optional[DeltaSharingScopeEnum] = None """The scope of Delta Sharing enabled for the metastore.""" external_access_enabled: Optional[bool] = None @@ -4988,7 +4790,7 @@ def from_dict(cls, d: Dict[str, Any]) -> GetMetastoreSummaryResponse: delta_sharing_recipient_token_lifetime_in_seconds=d.get( "delta_sharing_recipient_token_lifetime_in_seconds", None ), - delta_sharing_scope=_enum(d, "delta_sharing_scope", GetMetastoreSummaryResponseDeltaSharingScope), + delta_sharing_scope=_enum(d, "delta_sharing_scope", DeltaSharingScopeEnum), external_access_enabled=d.get("external_access_enabled", None), global_metastore_id=d.get("global_metastore_id", None), metastore_id=d.get("metastore_id", None), @@ -5004,11 +4806,40 @@ def from_dict(cls, d: Dict[str, Any]) -> GetMetastoreSummaryResponse: ) -class GetMetastoreSummaryResponseDeltaSharingScope(Enum): - """The scope of Delta Sharing enabled for the metastore.""" +@dataclass +class GetPermissionsResponse: + next_page_token: Optional[str] = None + """Opaque token to retrieve the next page of results. Absent if there are no more pages. + __page_token__ should be set to this value for the next request (for the next page of results).""" - INTERNAL = "INTERNAL" - INTERNAL_AND_EXTERNAL = "INTERNAL_AND_EXTERNAL" + privilege_assignments: Optional[List[PrivilegeAssignment]] = None + """The privileges assigned to each principal""" + + def as_dict(self) -> dict: + """Serializes the GetPermissionsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.privilege_assignments: + body["privilege_assignments"] = [v.as_dict() for v in self.privilege_assignments] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GetPermissionsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.privilege_assignments: + body["privilege_assignments"] = self.privilege_assignments + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GetPermissionsResponse: + """Deserializes the GetPermissionsResponse from a dictionary.""" + return cls( + next_page_token=d.get("next_page_token", None), + privilege_assignments=_repeated_dict(d, "privilege_assignments", PrivilegeAssignment), + ) @dataclass @@ -5230,41 +5061,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ListCredentialsResponse: ) -@dataclass -class ListDatabaseInstancesResponse: - database_instances: Optional[List[DatabaseInstance]] = None - """List of instances.""" - - next_page_token: Optional[str] = None - """Pagination token to request the next page of instances.""" - - def as_dict(self) -> dict: - """Serializes the ListDatabaseInstancesResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.database_instances: - body["database_instances"] = [v.as_dict() for v in self.database_instances] - if self.next_page_token is not None: - body["next_page_token"] = self.next_page_token - return body - - def as_shallow_dict(self) -> dict: - """Serializes the ListDatabaseInstancesResponse into a shallow dictionary of its immediate attributes.""" - body = {} - if self.database_instances: - body["database_instances"] = self.database_instances - if self.next_page_token is not None: - body["next_page_token"] = self.next_page_token - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ListDatabaseInstancesResponse: - """Deserializes the ListDatabaseInstancesResponse from a dictionary.""" - return cls( - database_instances=_repeated_dict(d, "database_instances", DatabaseInstance), - next_page_token=d.get("next_page_token", None), - ) - - @dataclass class ListExternalLocationsResponse: external_locations: Optional[List[ExternalLocationInfo]] = None @@ -5341,11 +5137,17 @@ class ListMetastoresResponse: metastores: Optional[List[MetastoreInfo]] = None """An array of metastore information objects.""" + next_page_token: Optional[str] = None + """Opaque token to retrieve the next page of results. Absent if there are no more pages. + __page_token__ should be set to this value for the next request (for the next page of results).""" + def as_dict(self) -> dict: """Serializes the ListMetastoresResponse into a dictionary suitable for use as a JSON request body.""" body = {} if self.metastores: body["metastores"] = [v.as_dict() for v in self.metastores] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token return body def as_shallow_dict(self) -> dict: @@ -5353,12 +5155,16 @@ def as_shallow_dict(self) -> dict: body = {} if self.metastores: body["metastores"] = self.metastores + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> ListMetastoresResponse: """Deserializes the ListMetastoresResponse from a dictionary.""" - return cls(metastores=_repeated_dict(d, "metastores", MetastoreInfo)) + return cls( + metastores=_repeated_dict(d, "metastores", MetastoreInfo), next_page_token=d.get("next_page_token", None) + ) @dataclass @@ -5674,12 +5480,12 @@ class MatchType(Enum): @dataclass class MetastoreAssignment: - metastore_id: str - """The unique ID of the metastore.""" - workspace_id: int """The unique ID of the Databricks workspace.""" + metastore_id: str + """The unique ID of the metastore.""" + default_catalog_name: Optional[str] = None """The name of the default catalog in the metastore.""" @@ -5736,7 +5542,7 @@ class MetastoreInfo: delta_sharing_recipient_token_lifetime_in_seconds: Optional[int] = None """The lifetime of delta sharing recipient token in seconds.""" - delta_sharing_scope: Optional[MetastoreInfoDeltaSharingScope] = None + delta_sharing_scope: Optional[DeltaSharingScopeEnum] = None """The scope of Delta Sharing enabled for the metastore.""" external_access_enabled: Optional[bool] = None @@ -5877,7 +5683,7 @@ def from_dict(cls, d: Dict[str, Any]) -> MetastoreInfo: delta_sharing_recipient_token_lifetime_in_seconds=d.get( "delta_sharing_recipient_token_lifetime_in_seconds", None ), - delta_sharing_scope=_enum(d, "delta_sharing_scope", MetastoreInfoDeltaSharingScope), + delta_sharing_scope=_enum(d, "delta_sharing_scope", DeltaSharingScopeEnum), external_access_enabled=d.get("external_access_enabled", None), global_metastore_id=d.get("global_metastore_id", None), metastore_id=d.get("metastore_id", None), @@ -5893,13 +5699,6 @@ def from_dict(cls, d: Dict[str, Any]) -> MetastoreInfo: ) -class MetastoreInfoDeltaSharingScope(Enum): - """The scope of Delta Sharing enabled for the metastore.""" - - INTERNAL = "INTERNAL" - INTERNAL_AND_EXTERNAL = "INTERNAL_AND_EXTERNAL" - - @dataclass class ModelVersionInfo: aliases: Optional[List[RegisteredModelAlias]] = None @@ -6769,43 +6568,6 @@ def from_dict(cls, d: Dict[str, Any]) -> NamedTableConstraint: return cls(name=d.get("name", None)) -@dataclass -class NewPipelineSpec: - """Custom fields that user can set for pipeline while creating SyncedDatabaseTable. Note that other - fields of pipeline are still inferred by table def internally""" - - storage_catalog: Optional[str] = None - """UC catalog for the pipeline to store intermediate files (checkpoints, event logs etc). This - needs to be a standard catalog where the user has permissions to create Delta tables.""" - - storage_schema: Optional[str] = None - """UC schema for the pipeline to store intermediate files (checkpoints, event logs etc). This needs - to be in the standard catalog where the user has permissions to create Delta tables.""" - - def as_dict(self) -> dict: - """Serializes the NewPipelineSpec into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.storage_catalog is not None: - body["storage_catalog"] = self.storage_catalog - if self.storage_schema is not None: - body["storage_schema"] = self.storage_schema - return body - - def as_shallow_dict(self) -> dict: - """Serializes the NewPipelineSpec into a shallow dictionary of its immediate attributes.""" - body = {} - if self.storage_catalog is not None: - body["storage_catalog"] = self.storage_catalog - if self.storage_schema is not None: - body["storage_schema"] = self.storage_schema - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> NewPipelineSpec: - """Deserializes the NewPipelineSpec from a dictionary.""" - return cls(storage_catalog=d.get("storage_catalog", None), storage_schema=d.get("storage_schema", None)) - - @dataclass class OnlineTable: """Online Table information.""" @@ -7120,31 +6882,6 @@ def from_dict(cls, d: Dict[str, Any]) -> PermissionsChange: ) -@dataclass -class PermissionsList: - privilege_assignments: Optional[List[PrivilegeAssignment]] = None - """The privileges assigned to each principal""" - - def as_dict(self) -> dict: - """Serializes the PermissionsList into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.privilege_assignments: - body["privilege_assignments"] = [v.as_dict() for v in self.privilege_assignments] - return body - - def as_shallow_dict(self) -> dict: - """Serializes the PermissionsList into a shallow dictionary of its immediate attributes.""" - body = {} - if self.privilege_assignments: - body["privilege_assignments"] = self.privilege_assignments - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> PermissionsList: - """Deserializes the PermissionsList from a dictionary.""" - return cls(privilege_assignments=_repeated_dict(d, "privilege_assignments", PrivilegeAssignment)) - - @dataclass class PipelineProgress: """Progress information of the Online Table data synchronization pipeline.""" @@ -7335,9 +7072,6 @@ def from_dict(cls, d: Dict[str, Any]) -> PrivilegeAssignment: return cls(principal=d.get("principal", None), privileges=_repeated_enum(d, "privileges", Privilege)) -PropertiesKvPairs = Dict[str, str] - - @dataclass class ProvisioningInfo: """Status of an asynchronously provisioned resource.""" @@ -7750,6 +7484,8 @@ def from_dict(cls, d: Dict[str, Any]) -> RegisteredModelInfo: @dataclass class SchemaInfo: + """Next ID: 40""" + browse_only: Optional[bool] = None """Indicates whether the principal is limited to retrieving metadata for the associated object through the BROWSE privilege when include_browse is enabled in the request.""" @@ -7757,7 +7493,7 @@ class SchemaInfo: catalog_name: Optional[str] = None """Name of parent catalog.""" - catalog_type: Optional[str] = None + catalog_type: Optional[CatalogType] = None """The type of the parent catalog.""" comment: Optional[str] = None @@ -7772,6 +7508,7 @@ class SchemaInfo: effective_predictive_optimization_flag: Optional[EffectivePredictiveOptimizationFlag] = None enable_predictive_optimization: Optional[EnablePredictiveOptimization] = None + """Whether predictive optimization should be enabled for this object and objects under it.""" full_name: Optional[str] = None """Full name of schema, in form of __catalog_name__.__schema_name__.""" @@ -7811,7 +7548,7 @@ def as_dict(self) -> dict: if self.catalog_name is not None: body["catalog_name"] = self.catalog_name if self.catalog_type is not None: - body["catalog_type"] = self.catalog_type + body["catalog_type"] = self.catalog_type.value if self.comment is not None: body["comment"] = self.comment if self.created_at is not None: @@ -7891,7 +7628,7 @@ def from_dict(cls, d: Dict[str, Any]) -> SchemaInfo: return cls( browse_only=d.get("browse_only", None), catalog_name=d.get("catalog_name", None), - catalog_type=d.get("catalog_type", None), + catalog_type=_enum(d, "catalog_type", CatalogType), comment=d.get("comment", None), created_at=d.get("created_at", None), created_by=d.get("created_by", None), @@ -7912,12 +7649,6 @@ def from_dict(cls, d: Dict[str, Any]) -> SchemaInfo: ) -SecurableOptionsMap = Dict[str, str] - - -SecurablePropertiesMap = Dict[str, str] - - class SecurableType(Enum): """The type of Unity Catalog securable.""" @@ -8249,184 +7980,6 @@ def from_dict(cls, d: Dict[str, Any]) -> StorageCredentialInfo: ) -@dataclass -class SyncedDatabaseTable: - """Next field marker: 10""" - - name: str - """Full three-part (catalog, schema, table) name of the table.""" - - data_synchronization_status: Optional[OnlineTableStatus] = None - """Synced Table data synchronization status""" - - database_instance_name: Optional[str] = None - """Name of the target database instance. This is required when creating synced database tables in - standard catalogs. This is optional when creating synced database tables in registered catalogs. - If this field is specified when creating synced database tables in registered catalogs, the - database instance name MUST match that of the registered catalog (or the request will be - rejected).""" - - logical_database_name: Optional[str] = None - """Target Postgres database object (logical database) name for this table. This field is optional - in all scenarios. - - When creating a synced table in a registered Postgres catalog, the target Postgres database name - is inferred to be that of the registered catalog. If this field is specified in this scenario, - the Postgres database name MUST match that of the registered catalog (or the request will be - rejected). - - When creating a synced table in a standard catalog, the target database name is inferred to be - that of the standard catalog. In this scenario, specifying this field will allow targeting an - arbitrary postgres database.""" - - spec: Optional[SyncedTableSpec] = None - """Specification of a synced database table.""" - - table_serving_url: Optional[str] = None - """Data serving REST API URL for this table""" - - unity_catalog_provisioning_state: Optional[ProvisioningInfoState] = None - """The provisioning state of the synced table entity in Unity Catalog. This is distinct from the - state of the data synchronization pipeline (i.e. the table may be in "ACTIVE" but the pipeline - may be in "PROVISIONING" as it runs asynchronously).""" - - def as_dict(self) -> dict: - """Serializes the SyncedDatabaseTable into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.data_synchronization_status: - body["data_synchronization_status"] = self.data_synchronization_status.as_dict() - if self.database_instance_name is not None: - body["database_instance_name"] = self.database_instance_name - if self.logical_database_name is not None: - body["logical_database_name"] = self.logical_database_name - if self.name is not None: - body["name"] = self.name - if self.spec: - body["spec"] = self.spec.as_dict() - if self.table_serving_url is not None: - body["table_serving_url"] = self.table_serving_url - if self.unity_catalog_provisioning_state is not None: - body["unity_catalog_provisioning_state"] = self.unity_catalog_provisioning_state.value - return body - - def as_shallow_dict(self) -> dict: - """Serializes the SyncedDatabaseTable into a shallow dictionary of its immediate attributes.""" - body = {} - if self.data_synchronization_status: - body["data_synchronization_status"] = self.data_synchronization_status - if self.database_instance_name is not None: - body["database_instance_name"] = self.database_instance_name - if self.logical_database_name is not None: - body["logical_database_name"] = self.logical_database_name - if self.name is not None: - body["name"] = self.name - if self.spec: - body["spec"] = self.spec - if self.table_serving_url is not None: - body["table_serving_url"] = self.table_serving_url - if self.unity_catalog_provisioning_state is not None: - body["unity_catalog_provisioning_state"] = self.unity_catalog_provisioning_state - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> SyncedDatabaseTable: - """Deserializes the SyncedDatabaseTable from a dictionary.""" - return cls( - data_synchronization_status=_from_dict(d, "data_synchronization_status", OnlineTableStatus), - database_instance_name=d.get("database_instance_name", None), - logical_database_name=d.get("logical_database_name", None), - name=d.get("name", None), - spec=_from_dict(d, "spec", SyncedTableSpec), - table_serving_url=d.get("table_serving_url", None), - unity_catalog_provisioning_state=_enum(d, "unity_catalog_provisioning_state", ProvisioningInfoState), - ) - - -class SyncedTableSchedulingPolicy(Enum): - - CONTINUOUS = "CONTINUOUS" - SNAPSHOT = "SNAPSHOT" - TRIGGERED = "TRIGGERED" - - -@dataclass -class SyncedTableSpec: - """Specification of a synced database table.""" - - create_database_objects_if_missing: Optional[bool] = None - """If true, the synced table's logical database and schema resources in PG will be created if they - do not already exist.""" - - new_pipeline_spec: Optional[NewPipelineSpec] = None - """Spec of new pipeline. Should be empty if pipeline_id is set""" - - pipeline_id: Optional[str] = None - """ID of the associated pipeline. Should be empty if new_pipeline_spec is set""" - - primary_key_columns: Optional[List[str]] = None - """Primary Key columns to be used for data insert/update in the destination.""" - - scheduling_policy: Optional[SyncedTableSchedulingPolicy] = None - """Scheduling policy of the underlying pipeline.""" - - source_table_full_name: Optional[str] = None - """Three-part (catalog, schema, table) name of the source Delta table.""" - - timeseries_key: Optional[str] = None - """Time series key to deduplicate (tie-break) rows with the same primary key.""" - - def as_dict(self) -> dict: - """Serializes the SyncedTableSpec into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.create_database_objects_if_missing is not None: - body["create_database_objects_if_missing"] = self.create_database_objects_if_missing - if self.new_pipeline_spec: - body["new_pipeline_spec"] = self.new_pipeline_spec.as_dict() - if self.pipeline_id is not None: - body["pipeline_id"] = self.pipeline_id - if self.primary_key_columns: - body["primary_key_columns"] = [v for v in self.primary_key_columns] - if self.scheduling_policy is not None: - body["scheduling_policy"] = self.scheduling_policy.value - if self.source_table_full_name is not None: - body["source_table_full_name"] = self.source_table_full_name - if self.timeseries_key is not None: - body["timeseries_key"] = self.timeseries_key - return body - - def as_shallow_dict(self) -> dict: - """Serializes the SyncedTableSpec into a shallow dictionary of its immediate attributes.""" - body = {} - if self.create_database_objects_if_missing is not None: - body["create_database_objects_if_missing"] = self.create_database_objects_if_missing - if self.new_pipeline_spec: - body["new_pipeline_spec"] = self.new_pipeline_spec - if self.pipeline_id is not None: - body["pipeline_id"] = self.pipeline_id - if self.primary_key_columns: - body["primary_key_columns"] = self.primary_key_columns - if self.scheduling_policy is not None: - body["scheduling_policy"] = self.scheduling_policy - if self.source_table_full_name is not None: - body["source_table_full_name"] = self.source_table_full_name - if self.timeseries_key is not None: - body["timeseries_key"] = self.timeseries_key - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> SyncedTableSpec: - """Deserializes the SyncedTableSpec from a dictionary.""" - return cls( - create_database_objects_if_missing=d.get("create_database_objects_if_missing", None), - new_pipeline_spec=_from_dict(d, "new_pipeline_spec", NewPipelineSpec), - pipeline_id=d.get("pipeline_id", None), - primary_key_columns=d.get("primary_key_columns", None), - scheduling_policy=_enum(d, "scheduling_policy", SyncedTableSchedulingPolicy), - source_table_full_name=d.get("source_table_full_name", None), - timeseries_key=d.get("timeseries_key", None), - ) - - @dataclass class SystemSchemaInfo: schema: str @@ -9543,7 +9096,7 @@ class UpdateMetastore: delta_sharing_recipient_token_lifetime_in_seconds: Optional[int] = None """The lifetime of delta sharing recipient token in seconds.""" - delta_sharing_scope: Optional[UpdateMetastoreDeltaSharingScope] = None + delta_sharing_scope: Optional[DeltaSharingScopeEnum] = None """The scope of Delta Sharing enabled for the metastore.""" id: Optional[str] = None @@ -9615,7 +9168,7 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateMetastore: delta_sharing_recipient_token_lifetime_in_seconds=d.get( "delta_sharing_recipient_token_lifetime_in_seconds", None ), - delta_sharing_scope=_enum(d, "delta_sharing_scope", UpdateMetastoreDeltaSharingScope), + delta_sharing_scope=_enum(d, "delta_sharing_scope", DeltaSharingScopeEnum), id=d.get("id", None), new_name=d.get("new_name", None), owner=d.get("owner", None), @@ -9627,7 +9180,7 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateMetastore: @dataclass class UpdateMetastoreAssignment: default_catalog_name: Optional[str] = None - """The name of the default catalog in the metastore. This field is depracted. Please use "Default + """The name of the default catalog in the metastore. This field is deprecated. Please use "Default Namespace API" to configure the default catalog for a Databricks workspace.""" metastore_id: Optional[str] = None @@ -9668,13 +9221,6 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateMetastoreAssignment: ) -class UpdateMetastoreDeltaSharingScope(Enum): - """The scope of Delta Sharing enabled for the metastore.""" - - INTERNAL = "INTERNAL" - INTERNAL_AND_EXTERNAL = "INTERNAL_AND_EXTERNAL" - - @dataclass class UpdateModelVersionRequest: comment: Optional[str] = None @@ -9843,7 +9389,7 @@ class UpdatePermissions: full_name: Optional[str] = None """Full name of securable.""" - securable_type: Optional[SecurableType] = None + securable_type: Optional[str] = None """Type of securable.""" def as_dict(self) -> dict: @@ -9854,7 +9400,7 @@ def as_dict(self) -> dict: if self.full_name is not None: body["full_name"] = self.full_name if self.securable_type is not None: - body["securable_type"] = self.securable_type.value + body["securable_type"] = self.securable_type return body def as_shallow_dict(self) -> dict: @@ -9874,10 +9420,35 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdatePermissions: return cls( changes=_repeated_dict(d, "changes", PermissionsChange), full_name=d.get("full_name", None), - securable_type=_enum(d, "securable_type", SecurableType), + securable_type=d.get("securable_type", None), ) +@dataclass +class UpdatePermissionsResponse: + privilege_assignments: Optional[List[PrivilegeAssignment]] = None + """The privileges assigned to each principal""" + + def as_dict(self) -> dict: + """Serializes the UpdatePermissionsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.privilege_assignments: + body["privilege_assignments"] = [v.as_dict() for v in self.privilege_assignments] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdatePermissionsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.privilege_assignments: + body["privilege_assignments"] = self.privilege_assignments + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdatePermissionsResponse: + """Deserializes the UpdatePermissionsResponse from a dictionary.""" + return cls(privilege_assignments=_repeated_dict(d, "privilege_assignments", PrivilegeAssignment)) + + @dataclass class UpdateRegisteredModelRequest: comment: Optional[str] = None @@ -9953,6 +9524,7 @@ class UpdateSchema: """User-provided free-form text description.""" enable_predictive_optimization: Optional[EnablePredictiveOptimization] = None + """Whether predictive optimization should be enabled for this object and objects under it.""" full_name: Optional[str] = None """Full name of the schema.""" @@ -10137,6 +9709,39 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateStorageCredential: ) +@dataclass +class UpdateTableRequest: + """Update a table owner.""" + + full_name: Optional[str] = None + """Full name of the table.""" + + owner: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the UpdateTableRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.full_name is not None: + body["full_name"] = self.full_name + if self.owner is not None: + body["owner"] = self.owner + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateTableRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.full_name is not None: + body["full_name"] = self.full_name + if self.owner is not None: + body["owner"] = self.owner + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateTableRequest: + """Deserializes the UpdateTableRequest from a dictionary.""" + return cls(full_name=d.get("full_name", None), owner=d.get("owner", None)) + + @dataclass class UpdateVolumeRequestContent: comment: Optional[str] = None @@ -11586,7 +11191,7 @@ def create( :param comment: str (optional) User-provided free-form text description. :param properties: Dict[str,str] (optional) - An object containing map of key-value properties attached to the connection. + A map of key-value properties attached to the securable. :param read_only: bool (optional) If the connection is read only. @@ -11673,8 +11278,6 @@ def list(self, *, max_results: Optional[int] = None, page_token: Optional[str] = "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/connections", query=query, headers=headers) if "connections" in json: @@ -12086,241 +11689,6 @@ def validate_credential( return ValidateCredentialResponse.from_dict(res) -class DatabaseInstancesAPI: - """Database Instances provide access to a database via REST API or direct SQL.""" - - def __init__(self, api_client): - self._api = api_client - - def create_database_catalog(self, catalog: DatabaseCatalog) -> DatabaseCatalog: - """Create a Database Catalog. - - :param catalog: :class:`DatabaseCatalog` - - :returns: :class:`DatabaseCatalog` - """ - body = catalog.as_dict() - headers = { - "Accept": "application/json", - "Content-Type": "application/json", - } - - res = self._api.do("POST", "/api/2.0/database/catalogs", body=body, headers=headers) - return DatabaseCatalog.from_dict(res) - - def create_database_instance(self, database_instance: DatabaseInstance) -> DatabaseInstance: - """Create a Database Instance. - - :param database_instance: :class:`DatabaseInstance` - A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage. - - :returns: :class:`DatabaseInstance` - """ - body = database_instance.as_dict() - headers = { - "Accept": "application/json", - "Content-Type": "application/json", - } - - res = self._api.do("POST", "/api/2.0/database/instances", body=body, headers=headers) - return DatabaseInstance.from_dict(res) - - def create_synced_database_table(self, synced_table: SyncedDatabaseTable) -> SyncedDatabaseTable: - """Create a Synced Database Table. - - :param synced_table: :class:`SyncedDatabaseTable` - Next field marker: 10 - - :returns: :class:`SyncedDatabaseTable` - """ - body = synced_table.as_dict() - headers = { - "Accept": "application/json", - "Content-Type": "application/json", - } - - res = self._api.do("POST", "/api/2.0/database/synced_tables", body=body, headers=headers) - return SyncedDatabaseTable.from_dict(res) - - def delete_database_catalog(self, name: str): - """Delete a Database Catalog. - - :param name: str - - - """ - - headers = { - "Accept": "application/json", - } - - self._api.do("DELETE", f"/api/2.0/database/catalogs/{name}", headers=headers) - - def delete_database_instance(self, name: str, *, force: Optional[bool] = None, purge: Optional[bool] = None): - """Delete a Database Instance. - - :param name: str - Name of the instance to delete. - :param force: bool (optional) - By default, a instance cannot be deleted if it has descendant instances created via PITR. If this - flag is specified as true, all descendent instances will be deleted as well. - :param purge: bool (optional) - If false, the database instance is soft deleted. Soft deleted instances behave as if they are - deleted, and cannot be used for CRUD operations nor connected to. However they can be undeleted by - calling the undelete API for a limited time. If true, the database instance is hard deleted and - cannot be undeleted. - - - """ - - query = {} - if force is not None: - query["force"] = force - if purge is not None: - query["purge"] = purge - headers = { - "Accept": "application/json", - } - - self._api.do("DELETE", f"/api/2.0/database/instances/{name}", query=query, headers=headers) - - def delete_synced_database_table(self, name: str): - """Delete a Synced Database Table. - - :param name: str - - - """ - - headers = { - "Accept": "application/json", - } - - self._api.do("DELETE", f"/api/2.0/database/synced_tables/{name}", headers=headers) - - def find_database_instance_by_uid(self, *, uid: Optional[str] = None) -> DatabaseInstance: - """Find a Database Instance by uid. - - :param uid: str (optional) - UID of the cluster to get. - - :returns: :class:`DatabaseInstance` - """ - - query = {} - if uid is not None: - query["uid"] = uid - headers = { - "Accept": "application/json", - } - - res = self._api.do("GET", "/api/2.0/database/instances:findByUid", query=query, headers=headers) - return DatabaseInstance.from_dict(res) - - def get_database_catalog(self, name: str) -> DatabaseCatalog: - """Get a Database Catalog. - - :param name: str - - :returns: :class:`DatabaseCatalog` - """ - - headers = { - "Accept": "application/json", - } - - res = self._api.do("GET", f"/api/2.0/database/catalogs/{name}", headers=headers) - return DatabaseCatalog.from_dict(res) - - def get_database_instance(self, name: str) -> DatabaseInstance: - """Get a Database Instance. - - :param name: str - Name of the cluster to get. - - :returns: :class:`DatabaseInstance` - """ - - headers = { - "Accept": "application/json", - } - - res = self._api.do("GET", f"/api/2.0/database/instances/{name}", headers=headers) - return DatabaseInstance.from_dict(res) - - def get_synced_database_table(self, name: str) -> SyncedDatabaseTable: - """Get a Synced Database Table. - - :param name: str - - :returns: :class:`SyncedDatabaseTable` - """ - - headers = { - "Accept": "application/json", - } - - res = self._api.do("GET", f"/api/2.0/database/synced_tables/{name}", headers=headers) - return SyncedDatabaseTable.from_dict(res) - - def list_database_instances( - self, *, page_size: Optional[int] = None, page_token: Optional[str] = None - ) -> Iterator[DatabaseInstance]: - """List Database Instances. - - :param page_size: int (optional) - Upper bound for items returned. - :param page_token: str (optional) - Pagination token to go to the next page of Database Instances. Requests first page if absent. - - :returns: Iterator over :class:`DatabaseInstance` - """ - - query = {} - if page_size is not None: - query["page_size"] = page_size - if page_token is not None: - query["page_token"] = page_token - headers = { - "Accept": "application/json", - } - - while True: - json = self._api.do("GET", "/api/2.0/database/instances", query=query, headers=headers) - if "database_instances" in json: - for v in json["database_instances"]: - yield DatabaseInstance.from_dict(v) - if "next_page_token" not in json or not json["next_page_token"]: - return - query["page_token"] = json["next_page_token"] - - def update_database_instance( - self, name: str, database_instance: DatabaseInstance, update_mask: str - ) -> DatabaseInstance: - """Update a Database Instance. - - :param name: str - The name of the instance. This is the unique identifier for the instance. - :param database_instance: :class:`DatabaseInstance` - A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage. - :param update_mask: str - The list of fields to update. - - :returns: :class:`DatabaseInstance` - """ - body = database_instance.as_dict() - query = {} - if update_mask is not None: - query["update_mask"] = update_mask - headers = { - "Accept": "application/json", - "Content-Type": "application/json", - } - - res = self._api.do("PATCH", f"/api/2.0/database/instances/{name}", query=query, body=body, headers=headers) - return DatabaseInstance.from_dict(res) - - class ExternalLocationsAPI: """An external location is an object that combines a cloud storage path with a storage credential that authorizes access to the cloud storage path. Each external location is subject to Unity Catalog @@ -12794,22 +12162,46 @@ class GrantsAPI: def __init__(self, api_client): self._api = api_client - def get(self, securable_type: SecurableType, full_name: str, *, principal: Optional[str] = None) -> PermissionsList: + def get( + self, + securable_type: str, + full_name: str, + *, + max_results: Optional[int] = None, + page_token: Optional[str] = None, + principal: Optional[str] = None, + ) -> GetPermissionsResponse: """Get permissions. - Gets the permissions for a securable. + Gets the permissions for a securable. Does not include inherited permissions. - :param securable_type: :class:`SecurableType` + :param securable_type: str Type of securable. :param full_name: str Full name of securable. + :param max_results: int (optional) + Specifies the maximum number of privileges to return (page length). Every PrivilegeAssignment + present in a single page response is guaranteed to contain all the privileges granted on the + requested Securable for the respective principal. + + If not set, all the permissions are returned. If set to - lesser than 0: invalid parameter error - + 0: page length is set to a server configured value - lesser than 150 but greater than 0: invalid + parameter error (this is to ensure that server is able to return at least one complete + PrivilegeAssignment in a single page response) - greater than (or equal to) 150: page length is the + minimum of this value and a server configured value + :param page_token: str (optional) + Opaque pagination token to go to next page based on previous query. :param principal: str (optional) If provided, only the permissions for the specified principal (user or group) are returned. - :returns: :class:`PermissionsList` + :returns: :class:`GetPermissionsResponse` """ query = {} + if max_results is not None: + query["max_results"] = max_results + if page_token is not None: + query["page_token"] = page_token if principal is not None: query["principal"] = principal headers = { @@ -12817,24 +12209,41 @@ def get(self, securable_type: SecurableType, full_name: str, *, principal: Optio } res = self._api.do( - "GET", - f"/api/2.1/unity-catalog/permissions/{securable_type.value}/{full_name}", - query=query, - headers=headers, + "GET", f"/api/2.1/unity-catalog/permissions/{securable_type}/{full_name}", query=query, headers=headers ) - return PermissionsList.from_dict(res) + return GetPermissionsResponse.from_dict(res) def get_effective( - self, securable_type: SecurableType, full_name: str, *, principal: Optional[str] = None + self, + securable_type: str, + full_name: str, + *, + max_results: Optional[int] = None, + page_token: Optional[str] = None, + principal: Optional[str] = None, ) -> EffectivePermissionsList: """Get effective permissions. - Gets the effective permissions for a securable. + Gets the effective permissions for a securable. Includes inherited permissions from any parent + securables. - :param securable_type: :class:`SecurableType` + :param securable_type: str Type of securable. :param full_name: str Full name of securable. + :param max_results: int (optional) + Specifies the maximum number of privileges to return (page length). Every + EffectivePrivilegeAssignment present in a single page response is guaranteed to contain all the + effective privileges granted on (or inherited by) the requested Securable for the respective + principal. + + If not set, all the effective permissions are returned. If set to - lesser than 0: invalid parameter + error - 0: page length is set to a server configured value - lesser than 150 but greater than 0: + invalid parameter error (this is to ensure that server is able to return at least one complete + EffectivePrivilegeAssignment in a single page response) - greater than (or equal to) 150: page + length is the minimum of this value and a server configured value + :param page_token: str (optional) + Opaque token for the next page of results (pagination). :param principal: str (optional) If provided, only the effective permissions for the specified principal (user or group) are returned. @@ -12843,6 +12252,10 @@ def get_effective( """ query = {} + if max_results is not None: + query["max_results"] = max_results + if page_token is not None: + query["page_token"] = page_token if principal is not None: query["principal"] = principal headers = { @@ -12851,27 +12264,27 @@ def get_effective( res = self._api.do( "GET", - f"/api/2.1/unity-catalog/effective-permissions/{securable_type.value}/{full_name}", + f"/api/2.1/unity-catalog/effective-permissions/{securable_type}/{full_name}", query=query, headers=headers, ) return EffectivePermissionsList.from_dict(res) def update( - self, securable_type: SecurableType, full_name: str, *, changes: Optional[List[PermissionsChange]] = None - ) -> PermissionsList: + self, securable_type: str, full_name: str, *, changes: Optional[List[PermissionsChange]] = None + ) -> UpdatePermissionsResponse: """Update permissions. Updates the permissions for a securable. - :param securable_type: :class:`SecurableType` + :param securable_type: str Type of securable. :param full_name: str Full name of securable. :param changes: List[:class:`PermissionsChange`] (optional) Array of permissions change objects. - :returns: :class:`PermissionsList` + :returns: :class:`UpdatePermissionsResponse` """ body = {} if changes is not None: @@ -12882,12 +12295,9 @@ def update( } res = self._api.do( - "PATCH", - f"/api/2.1/unity-catalog/permissions/{securable_type.value}/{full_name}", - body=body, - headers=headers, + "PATCH", f"/api/2.1/unity-catalog/permissions/{securable_type}/{full_name}", body=body, headers=headers ) - return PermissionsList.from_dict(res) + return UpdatePermissionsResponse.from_dict(res) class MetastoresAPI: @@ -12918,7 +12328,7 @@ def assign(self, workspace_id: int, metastore_id: str, default_catalog_name: str :param metastore_id: str The unique ID of the metastore. :param default_catalog_name: str - The name of the default catalog in the metastore. This field is depracted. Please use "Default + The name of the default catalog in the metastore. This field is deprecated. Please use "Default Namespace API" to configure the default catalog for a Databricks workspace. @@ -12946,9 +12356,7 @@ def create(self, name: str, *, region: Optional[str] = None, storage_root: Optio :param name: str The user-specified name of the metastore. :param region: str (optional) - Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). The field can be omitted in - the __workspace-level__ __API__ but not in the __account-level__ __API__. If this field is omitted, - the region of the workspace receiving the request will be used. + Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). :param storage_root: str (optional) The storage root URL for metastore @@ -13025,22 +12433,43 @@ def get(self, id: str) -> MetastoreInfo: res = self._api.do("GET", f"/api/2.1/unity-catalog/metastores/{id}", headers=headers) return MetastoreInfo.from_dict(res) - def list(self) -> Iterator[MetastoreInfo]: + def list(self, *, max_results: Optional[int] = None, page_token: Optional[str] = None) -> Iterator[MetastoreInfo]: """List metastores. Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin to retrieve this info. There is no guarantee of a specific ordering of the elements in the array. + :param max_results: int (optional) + Maximum number of metastores to return. - when set to a value greater than 0, the page length is the + minimum of this value and a server configured value; - when set to 0, the page length is set to a + server configured value (recommended); - when set to a value less than 0, an invalid parameter error + is returned; - If not set, all the metastores are returned (not recommended). - Note: The number of + returned metastores might be less than the specified max_results size, even zero. The only + definitive indication that no further metastores can be fetched is when the next_page_token is unset + from the response. + :param page_token: str (optional) + Opaque pagination token to go to next page based on previous query. + :returns: Iterator over :class:`MetastoreInfo` """ + query = {} + if max_results is not None: + query["max_results"] = max_results + if page_token is not None: + query["page_token"] = page_token headers = { "Accept": "application/json", } - json = self._api.do("GET", "/api/2.1/unity-catalog/metastores", headers=headers) - parsed = ListMetastoresResponse.from_dict(json).metastores - return parsed if parsed is not None else [] + while True: + json = self._api.do("GET", "/api/2.1/unity-catalog/metastores", query=query, headers=headers) + if "metastores" in json: + for v in json["metastores"]: + yield MetastoreInfo.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] def summary(self) -> GetMetastoreSummaryResponse: """Get a metastore summary. @@ -13088,7 +12517,7 @@ def update( *, delta_sharing_organization_name: Optional[str] = None, delta_sharing_recipient_token_lifetime_in_seconds: Optional[int] = None, - delta_sharing_scope: Optional[UpdateMetastoreDeltaSharingScope] = None, + delta_sharing_scope: Optional[DeltaSharingScopeEnum] = None, new_name: Optional[str] = None, owner: Optional[str] = None, privilege_model_version: Optional[str] = None, @@ -13106,7 +12535,7 @@ def update( Sharing as the official name. :param delta_sharing_recipient_token_lifetime_in_seconds: int (optional) The lifetime of delta sharing recipient token in seconds. - :param delta_sharing_scope: :class:`UpdateMetastoreDeltaSharingScope` (optional) + :param delta_sharing_scope: :class:`DeltaSharingScopeEnum` (optional) The scope of Delta Sharing enabled for the metastore. :param new_name: str (optional) New name for the metastore. @@ -13157,7 +12586,7 @@ def update_assignment( :param workspace_id: int A workspace ID. :param default_catalog_name: str (optional) - The name of the default catalog in the metastore. This field is depracted. Please use "Default + The name of the default catalog in the metastore. This field is deprecated. Please use "Default Namespace API" to configure the default catalog for a Databricks workspace. :param metastore_id: str (optional) The unique ID of the metastore. @@ -14409,8 +13838,6 @@ def list( "Accept": "application/json", } - if "max_results" not in query: - query["max_results"] = 0 while True: json = self._api.do("GET", "/api/2.1/unity-catalog/schemas", query=query, headers=headers) if "schemas" in json: @@ -14442,6 +13869,7 @@ def update( :param comment: str (optional) User-provided free-form text description. :param enable_predictive_optimization: :class:`EnablePredictiveOptimization` (optional) + Whether predictive optimization should be enabled for this object and objects under it. :param new_name: str (optional) New name for the schema. :param owner: str (optional) diff --git a/databricks/sdk/service/cleanrooms.py b/databricks/sdk/service/cleanrooms.py index edf1cd253..490f7711e 100755 --- a/databricks/sdk/service/cleanrooms.py +++ b/databricks/sdk/service/cleanrooms.py @@ -138,6 +138,10 @@ class CleanRoomAsset: asset_type: Optional[CleanRoomAssetAssetType] = None """The type of the asset.""" + clean_room_name: Optional[str] = None + """The name of the clean room this asset belongs to. This is an output-only field to ensure proper + resource identification.""" + foreign_table: Optional[CleanRoomAssetForeignTable] = None """Foreign table details available to all collaborators of the clean room. Present if and only if **asset_type** is **FOREIGN_TABLE**""" @@ -192,6 +196,8 @@ def as_dict(self) -> dict: body["added_at"] = self.added_at if self.asset_type is not None: body["asset_type"] = self.asset_type.value + if self.clean_room_name is not None: + body["clean_room_name"] = self.clean_room_name if self.foreign_table: body["foreign_table"] = self.foreign_table.as_dict() if self.foreign_table_local_details: @@ -223,6 +229,8 @@ def as_shallow_dict(self) -> dict: body["added_at"] = self.added_at if self.asset_type is not None: body["asset_type"] = self.asset_type + if self.clean_room_name is not None: + body["clean_room_name"] = self.clean_room_name if self.foreign_table: body["foreign_table"] = self.foreign_table if self.foreign_table_local_details: @@ -253,6 +261,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CleanRoomAsset: return cls( added_at=d.get("added_at", None), asset_type=_enum(d, "asset_type", CleanRoomAssetAssetType), + clean_room_name=d.get("clean_room_name", None), foreign_table=_from_dict(d, "foreign_table", CleanRoomAssetForeignTable), foreign_table_local_details=_from_dict( d, "foreign_table_local_details", CleanRoomAssetForeignTableLocalDetails @@ -1236,7 +1245,7 @@ def create(self, clean_room_name: str, asset: CleanRoomAsset) -> CleanRoomAsset: res = self._api.do("POST", f"/api/2.0/clean-rooms/{clean_room_name}/assets", body=body, headers=headers) return CleanRoomAsset.from_dict(res) - def delete(self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, asset_full_name: str): + def delete(self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str): """Delete an asset. Delete a clean room asset - unshare/remove the asset from the clean room @@ -1245,7 +1254,7 @@ def delete(self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, asse Name of the clean room. :param asset_type: :class:`CleanRoomAssetAssetType` The type of the asset. - :param asset_full_name: str + :param name: str The fully qualified name of the asset, it is same as the name field in CleanRoomAsset. @@ -1256,12 +1265,10 @@ def delete(self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, asse } self._api.do( - "DELETE", - f"/api/2.0/clean-rooms/{clean_room_name}/assets/{asset_type.value}/{asset_full_name}", - headers=headers, + "DELETE", f"/api/2.0/clean-rooms/{clean_room_name}/assets/{asset_type.value}/{name}", headers=headers ) - def get(self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, asset_full_name: str) -> CleanRoomAsset: + def get(self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str) -> CleanRoomAsset: """Get an asset. Get the details of a clean room asset by its type and full name. @@ -1270,7 +1277,7 @@ def get(self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, asset_f Name of the clean room. :param asset_type: :class:`CleanRoomAssetAssetType` The type of the asset. - :param asset_full_name: str + :param name: str The fully qualified name of the asset, it is same as the name field in CleanRoomAsset. :returns: :class:`CleanRoomAsset` @@ -1281,9 +1288,7 @@ def get(self, clean_room_name: str, asset_type: CleanRoomAssetAssetType, asset_f } res = self._api.do( - "GET", - f"/api/2.0/clean-rooms/{clean_room_name}/assets/{asset_type.value}/{asset_full_name}", - headers=headers, + "GET", f"/api/2.0/clean-rooms/{clean_room_name}/assets/{asset_type.value}/{name}", headers=headers ) return CleanRoomAsset.from_dict(res) diff --git a/databricks/sdk/service/compute.py b/databricks/sdk/service/compute.py index aa35234aa..46d940a04 100755 --- a/databricks/sdk/service/compute.py +++ b/databricks/sdk/service/compute.py @@ -3528,16 +3528,10 @@ class CustomPolicyTag: key: str """The key of the tag. - Must be unique among all custom tags of the same policy - Cannot be “budget-policy-name”, “budget-policy-id” or "budget-policy-resolution-result" - these - tags are preserved. - - - Follows the regex pattern defined in cluster-common/conf/src/ClusterTagConstraints.scala - (https://src.dev.databricks.com/databricks/universe@1647196627c8dc7b4152ad098a94b86484b93a6c/-/blob/cluster-common/conf/src/ClusterTagConstraints.scala?L17)""" + tags are preserved.""" value: Optional[str] = None - """The value of the tag. - - - Follows the regex pattern defined in cluster-common/conf/src/ClusterTagConstraints.scala - (https://src.dev.databricks.com/databricks/universe@1647196627c8dc7b4152ad098a94b86484b93a6c/-/blob/cluster-common/conf/src/ClusterTagConstraints.scala?L24)""" + """The value of the tag.""" def as_dict(self) -> dict: """Serializes the CustomPolicyTag into a dictionary suitable for use as a JSON request body.""" @@ -4781,25 +4775,19 @@ class Environment: non-notebook task, and DLT's environment for classic and serverless pipelines. In this minimal environment spec, only pip dependencies are supported.""" - client: str - """Client version used by the environment The client is the user-facing environment of the runtime. - Each client comes with a specific set of pre-installed libraries. The version is a string, - consisting of the major client version.""" + client: Optional[str] = None + """Use `environment_version` instead.""" dependencies: Optional[List[str]] = None """List of pip dependencies, as supported by the version of pip in this environment. Each - dependency is a pip requirement file line - https://pip.pypa.io/en/stable/reference/requirements-file-format/ Allowed dependency could be - , , (WSFS or Volumes in - Databricks), E.g. dependencies: ["foo==0.0.1", "-r - /Workspace/test/requirements.txt"]""" + dependency is a valid pip requirements file line per + https://pip.pypa.io/en/stable/reference/requirements-file-format/. Allowed dependencies include + a requirement specifier, an archive URL, a local project path (such as WSFS or UC Volumes in + Databricks), or a VCS project URL.""" environment_version: Optional[str] = None - """We renamed `client` to `environment_version` in notebook exports. This field is meant solely so - that imported notebooks with `environment_version` can be deserialized correctly, in a - backwards-compatible way (i.e. if `client` is specified instead of `environment_version`, it - will be deserialized correctly). Do NOT use this field for any other purpose, e.g. notebook - storage. This field is not yet exposed to customers (e.g. in the jobs API).""" + """Required. Environment version used by the environment. Each version comes with a specific Python + version and a set of Python packages. The version is a string, consisting of an integer.""" jar_dependencies: Optional[List[str]] = None """List of jar dependencies, should be string representing volume paths. For example: @@ -5038,6 +5026,7 @@ class EventType(Enum): AUTOSCALING_BACKOFF = "AUTOSCALING_BACKOFF" AUTOSCALING_FAILED = "AUTOSCALING_FAILED" AUTOSCALING_STATS_REPORT = "AUTOSCALING_STATS_REPORT" + CLUSTER_MIGRATED = "CLUSTER_MIGRATED" CREATING = "CREATING" DBFS_DOWN = "DBFS_DOWN" DID_NOT_EXPAND_DISK = "DID_NOT_EXPAND_DISK" @@ -9082,6 +9071,7 @@ class TerminationReasonCode(Enum): DRIVER_OUT_OF_MEMORY = "DRIVER_OUT_OF_MEMORY" DRIVER_POD_CREATION_FAILURE = "DRIVER_POD_CREATION_FAILURE" DRIVER_UNEXPECTED_FAILURE = "DRIVER_UNEXPECTED_FAILURE" + DRIVER_UNHEALTHY = "DRIVER_UNHEALTHY" DRIVER_UNREACHABLE = "DRIVER_UNREACHABLE" DRIVER_UNRESPONSIVE = "DRIVER_UNRESPONSIVE" DYNAMIC_SPARK_CONF_SIZE_EXCEEDED = "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED" diff --git a/databricks/sdk/service/dashboards.py b/databricks/sdk/service/dashboards.py index 6a3945727..eb0bfbf16 100755 --- a/databricks/sdk/service/dashboards.py +++ b/databricks/sdk/service/dashboards.py @@ -102,76 +102,6 @@ def from_dict(cls, d: Dict[str, Any]) -> AuthorizationDetailsGrantRule: return cls(permission_set=d.get("permission_set", None)) -@dataclass -class CancelQueryExecutionResponse: - status: Optional[List[CancelQueryExecutionResponseStatus]] = None - - def as_dict(self) -> dict: - """Serializes the CancelQueryExecutionResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.status: - body["status"] = [v.as_dict() for v in self.status] - return body - - def as_shallow_dict(self) -> dict: - """Serializes the CancelQueryExecutionResponse into a shallow dictionary of its immediate attributes.""" - body = {} - if self.status: - body["status"] = self.status - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> CancelQueryExecutionResponse: - """Deserializes the CancelQueryExecutionResponse from a dictionary.""" - return cls(status=_repeated_dict(d, "status", CancelQueryExecutionResponseStatus)) - - -@dataclass -class CancelQueryExecutionResponseStatus: - data_token: str - """The token to poll for result asynchronously Example: - EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ""" - - pending: Optional[Empty] = None - """Represents an empty message, similar to google.protobuf.Empty, which is not available in the - firm right now.""" - - success: Optional[Empty] = None - """Represents an empty message, similar to google.protobuf.Empty, which is not available in the - firm right now.""" - - def as_dict(self) -> dict: - """Serializes the CancelQueryExecutionResponseStatus into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.data_token is not None: - body["data_token"] = self.data_token - if self.pending: - body["pending"] = self.pending.as_dict() - if self.success: - body["success"] = self.success.as_dict() - return body - - def as_shallow_dict(self) -> dict: - """Serializes the CancelQueryExecutionResponseStatus into a shallow dictionary of its immediate attributes.""" - body = {} - if self.data_token is not None: - body["data_token"] = self.data_token - if self.pending: - body["pending"] = self.pending - if self.success: - body["success"] = self.success - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> CancelQueryExecutionResponseStatus: - """Deserializes the CancelQueryExecutionResponseStatus from a dictionary.""" - return cls( - data_token=d.get("data_token", None), - pending=_from_dict(d, "pending", Empty), - success=_from_dict(d, "success", Empty), - ) - - @dataclass class CronSchedule: quartz_cron_expression: str @@ -359,94 +289,6 @@ def from_dict(cls, d: Dict[str, Any]) -> DeleteSubscriptionResponse: return cls() -@dataclass -class Empty: - """Represents an empty message, similar to google.protobuf.Empty, which is not available in the - firm right now.""" - - def as_dict(self) -> dict: - """Serializes the Empty into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the Empty into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> Empty: - """Deserializes the Empty from a dictionary.""" - return cls() - - -@dataclass -class ExecutePublishedDashboardQueryRequest: - """Execute query request for published Dashboards. Since published dashboards have the option of - running as the publisher, the datasets, warehouse_id are excluded from the request and instead - read from the source (lakeview-config) via the additional parameters (dashboardName and - dashboardRevisionId)""" - - dashboard_name: str - """Dashboard name and revision_id is required to retrieve PublishedDatasetDataModel which contains - the list of datasets, warehouse_id, and embedded_credentials""" - - dashboard_revision_id: str - - override_warehouse_id: Optional[str] = None - """A dashboard schedule can override the warehouse used as compute for processing the published - dashboard queries""" - - def as_dict(self) -> dict: - """Serializes the ExecutePublishedDashboardQueryRequest into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.dashboard_name is not None: - body["dashboard_name"] = self.dashboard_name - if self.dashboard_revision_id is not None: - body["dashboard_revision_id"] = self.dashboard_revision_id - if self.override_warehouse_id is not None: - body["override_warehouse_id"] = self.override_warehouse_id - return body - - def as_shallow_dict(self) -> dict: - """Serializes the ExecutePublishedDashboardQueryRequest into a shallow dictionary of its immediate attributes.""" - body = {} - if self.dashboard_name is not None: - body["dashboard_name"] = self.dashboard_name - if self.dashboard_revision_id is not None: - body["dashboard_revision_id"] = self.dashboard_revision_id - if self.override_warehouse_id is not None: - body["override_warehouse_id"] = self.override_warehouse_id - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ExecutePublishedDashboardQueryRequest: - """Deserializes the ExecutePublishedDashboardQueryRequest from a dictionary.""" - return cls( - dashboard_name=d.get("dashboard_name", None), - dashboard_revision_id=d.get("dashboard_revision_id", None), - override_warehouse_id=d.get("override_warehouse_id", None), - ) - - -@dataclass -class ExecuteQueryResponse: - def as_dict(self) -> dict: - """Serializes the ExecuteQueryResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the ExecuteQueryResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ExecuteQueryResponse: - """Deserializes the ExecuteQueryResponse from a dictionary.""" - return cls() - - @dataclass class GenieAttachment: """Genie AI Response""" @@ -687,6 +529,38 @@ def from_dict(cls, d: Dict[str, Any]) -> GenieGetMessageQueryResultResponse: return cls(statement_response=_from_dict(d, "statement_response", sql.StatementResponse)) +@dataclass +class GenieListSpacesResponse: + next_page_token: Optional[str] = None + """Token to get the next page of results""" + + spaces: Optional[List[GenieSpace]] = None + """List of Genie spaces""" + + def as_dict(self) -> dict: + """Serializes the GenieListSpacesResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.spaces: + body["spaces"] = [v.as_dict() for v in self.spaces] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GenieListSpacesResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.spaces: + body["spaces"] = self.spaces + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GenieListSpacesResponse: + """Deserializes the GenieListSpacesResponse from a dictionary.""" + return cls(next_page_token=d.get("next_page_token", None), spaces=_repeated_dict(d, "spaces", GenieSpace)) + + @dataclass class GenieMessage: id: str @@ -1043,24 +917,6 @@ def from_dict(cls, d: Dict[str, Any]) -> GenieStartConversationResponse: ) -@dataclass -class GetPublishedDashboardEmbeddedResponse: - def as_dict(self) -> dict: - """Serializes the GetPublishedDashboardEmbeddedResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - return body - - def as_shallow_dict(self) -> dict: - """Serializes the GetPublishedDashboardEmbeddedResponse into a shallow dictionary of its immediate attributes.""" - body = {} - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> GetPublishedDashboardEmbeddedResponse: - """Deserializes the GetPublishedDashboardEmbeddedResponse from a dictionary.""" - return cls() - - @dataclass class GetPublishedDashboardTokenInfoResponse: authorization_details: Optional[List[AuthorizationDetails]] = None @@ -1381,80 +1237,6 @@ def from_dict(cls, d: Dict[str, Any]) -> MigrateDashboardRequest: ) -@dataclass -class PendingStatus: - data_token: str - """The token to poll for result asynchronously Example: - EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ""" - - def as_dict(self) -> dict: - """Serializes the PendingStatus into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.data_token is not None: - body["data_token"] = self.data_token - return body - - def as_shallow_dict(self) -> dict: - """Serializes the PendingStatus into a shallow dictionary of its immediate attributes.""" - body = {} - if self.data_token is not None: - body["data_token"] = self.data_token - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> PendingStatus: - """Deserializes the PendingStatus from a dictionary.""" - return cls(data_token=d.get("data_token", None)) - - -@dataclass -class PollQueryStatusResponse: - data: Optional[List[PollQueryStatusResponseData]] = None - - def as_dict(self) -> dict: - """Serializes the PollQueryStatusResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.data: - body["data"] = [v.as_dict() for v in self.data] - return body - - def as_shallow_dict(self) -> dict: - """Serializes the PollQueryStatusResponse into a shallow dictionary of its immediate attributes.""" - body = {} - if self.data: - body["data"] = self.data - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> PollQueryStatusResponse: - """Deserializes the PollQueryStatusResponse from a dictionary.""" - return cls(data=_repeated_dict(d, "data", PollQueryStatusResponseData)) - - -@dataclass -class PollQueryStatusResponseData: - status: QueryResponseStatus - - def as_dict(self) -> dict: - """Serializes the PollQueryStatusResponseData into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.status: - body["status"] = self.status.as_dict() - return body - - def as_shallow_dict(self) -> dict: - """Serializes the PollQueryStatusResponseData into a shallow dictionary of its immediate attributes.""" - body = {} - if self.status: - body["status"] = self.status - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> PollQueryStatusResponseData: - """Deserializes the PollQueryStatusResponseData from a dictionary.""" - return cls(status=_from_dict(d, "status", QueryResponseStatus)) - - @dataclass class PublishRequest: dashboard_id: Optional[str] = None @@ -1550,67 +1332,6 @@ def from_dict(cls, d: Dict[str, Any]) -> PublishedDashboard: ) -@dataclass -class QueryResponseStatus: - canceled: Optional[Empty] = None - """Represents an empty message, similar to google.protobuf.Empty, which is not available in the - firm right now.""" - - closed: Optional[Empty] = None - """Represents an empty message, similar to google.protobuf.Empty, which is not available in the - firm right now.""" - - pending: Optional[PendingStatus] = None - - statement_id: Optional[str] = None - """The statement id in format(01eef5da-c56e-1f36-bafa-21906587d6ba) The statement_id should be - identical to data_token in SuccessStatus and PendingStatus. This field is created for audit - logging purpose to record the statement_id of all QueryResponseStatus.""" - - success: Optional[SuccessStatus] = None - - def as_dict(self) -> dict: - """Serializes the QueryResponseStatus into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.canceled: - body["canceled"] = self.canceled.as_dict() - if self.closed: - body["closed"] = self.closed.as_dict() - if self.pending: - body["pending"] = self.pending.as_dict() - if self.statement_id is not None: - body["statement_id"] = self.statement_id - if self.success: - body["success"] = self.success.as_dict() - return body - - def as_shallow_dict(self) -> dict: - """Serializes the QueryResponseStatus into a shallow dictionary of its immediate attributes.""" - body = {} - if self.canceled: - body["canceled"] = self.canceled - if self.closed: - body["closed"] = self.closed - if self.pending: - body["pending"] = self.pending - if self.statement_id is not None: - body["statement_id"] = self.statement_id - if self.success: - body["success"] = self.success - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> QueryResponseStatus: - """Deserializes the QueryResponseStatus from a dictionary.""" - return cls( - canceled=_from_dict(d, "canceled", Empty), - closed=_from_dict(d, "closed", Empty), - pending=_from_dict(d, "pending", PendingStatus), - statement_id=d.get("statement_id", None), - success=_from_dict(d, "success", SuccessStatus), - ) - - @dataclass class Result: is_truncated: Optional[bool] = None @@ -1926,39 +1647,6 @@ def from_dict(cls, d: Dict[str, Any]) -> SubscriptionSubscriberUser: return cls(user_id=d.get("user_id", None)) -@dataclass -class SuccessStatus: - data_token: str - """The token to poll for result asynchronously Example: - EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ""" - - truncated: Optional[bool] = None - """Whether the query result is truncated (either by byte limit or row limit)""" - - def as_dict(self) -> dict: - """Serializes the SuccessStatus into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.data_token is not None: - body["data_token"] = self.data_token - if self.truncated is not None: - body["truncated"] = self.truncated - return body - - def as_shallow_dict(self) -> dict: - """Serializes the SuccessStatus into a shallow dictionary of its immediate attributes.""" - body = {} - if self.data_token is not None: - body["data_token"] = self.data_token - if self.truncated is not None: - body["truncated"] = self.truncated - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> SuccessStatus: - """Deserializes the SuccessStatus from a dictionary.""" - return cls(data_token=d.get("data_token", None), truncated=d.get("truncated", None)) - - @dataclass class TextAttachment: content: Optional[str] = None @@ -2382,6 +2070,33 @@ def get_space(self, space_id: str) -> GenieSpace: res = self._api.do("GET", f"/api/2.0/genie/spaces/{space_id}", headers=headers) return GenieSpace.from_dict(res) + def list_spaces( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> GenieListSpacesResponse: + """List Genie spaces. + + Get list of Genie Spaces. + + :param page_size: int (optional) + Maximum number of spaces to return per page + :param page_token: str (optional) + Pagination token for getting the next page of results + + :returns: :class:`GenieListSpacesResponse` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", "/api/2.0/genie/spaces", query=query, headers=headers) + return GenieListSpacesResponse.from_dict(res) + def start_conversation(self, space_id: str, content: str) -> Wait[GenieMessage]: """Start conversation. @@ -2904,23 +2619,6 @@ class LakeviewEmbeddedAPI: def __init__(self, api_client): self._api = api_client - def get_published_dashboard_embedded(self, dashboard_id: str): - """Read a published dashboard in an embedded ui. - - Get the current published dashboard within an embedded context. - - :param dashboard_id: str - UUID identifying the published dashboard. - - - """ - - headers = { - "Accept": "application/json", - } - - self._api.do("GET", f"/api/2.0/lakeview/dashboards/{dashboard_id}/published/embedded", headers=headers) - def get_published_dashboard_token_info( self, dashboard_id: str, *, external_value: Optional[str] = None, external_viewer_id: Optional[str] = None ) -> GetPublishedDashboardTokenInfoResponse: @@ -2956,93 +2654,3 @@ def get_published_dashboard_token_info( "GET", f"/api/2.0/lakeview/dashboards/{dashboard_id}/published/tokeninfo", query=query, headers=headers ) return GetPublishedDashboardTokenInfoResponse.from_dict(res) - - -class QueryExecutionAPI: - """Query execution APIs for AI / BI Dashboards""" - - def __init__(self, api_client): - self._api = api_client - - def cancel_published_query_execution( - self, dashboard_name: str, dashboard_revision_id: str, *, tokens: Optional[List[str]] = None - ) -> CancelQueryExecutionResponse: - """Cancel the results for the a query for a published, embedded dashboard. - - :param dashboard_name: str - :param dashboard_revision_id: str - :param tokens: List[str] (optional) - Example: EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ - - :returns: :class:`CancelQueryExecutionResponse` - """ - - query = {} - if dashboard_name is not None: - query["dashboard_name"] = dashboard_name - if dashboard_revision_id is not None: - query["dashboard_revision_id"] = dashboard_revision_id - if tokens is not None: - query["tokens"] = [v for v in tokens] - headers = { - "Accept": "application/json", - } - - res = self._api.do("DELETE", "/api/2.0/lakeview-query/query/published", query=query, headers=headers) - return CancelQueryExecutionResponse.from_dict(res) - - def execute_published_dashboard_query( - self, dashboard_name: str, dashboard_revision_id: str, *, override_warehouse_id: Optional[str] = None - ): - """Execute a query for a published dashboard. - - :param dashboard_name: str - Dashboard name and revision_id is required to retrieve PublishedDatasetDataModel which contains the - list of datasets, warehouse_id, and embedded_credentials - :param dashboard_revision_id: str - :param override_warehouse_id: str (optional) - A dashboard schedule can override the warehouse used as compute for processing the published - dashboard queries - - - """ - body = {} - if dashboard_name is not None: - body["dashboard_name"] = dashboard_name - if dashboard_revision_id is not None: - body["dashboard_revision_id"] = dashboard_revision_id - if override_warehouse_id is not None: - body["override_warehouse_id"] = override_warehouse_id - headers = { - "Accept": "application/json", - "Content-Type": "application/json", - } - - self._api.do("POST", "/api/2.0/lakeview-query/query/published", body=body, headers=headers) - - def poll_published_query_status( - self, dashboard_name: str, dashboard_revision_id: str, *, tokens: Optional[List[str]] = None - ) -> PollQueryStatusResponse: - """Poll the results for the a query for a published, embedded dashboard. - - :param dashboard_name: str - :param dashboard_revision_id: str - :param tokens: List[str] (optional) - Example: EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ - - :returns: :class:`PollQueryStatusResponse` - """ - - query = {} - if dashboard_name is not None: - query["dashboard_name"] = dashboard_name - if dashboard_revision_id is not None: - query["dashboard_revision_id"] = dashboard_revision_id - if tokens is not None: - query["tokens"] = [v for v in tokens] - headers = { - "Accept": "application/json", - } - - res = self._api.do("GET", "/api/2.0/lakeview-query/query/published", query=query, headers=headers) - return PollQueryStatusResponse.from_dict(res) diff --git a/databricks/sdk/service/database.py b/databricks/sdk/service/database.py new file mode 100755 index 000000000..c9a9c0ced --- /dev/null +++ b/databricks/sdk/service/database.py @@ -0,0 +1,1256 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, Iterator, List, Optional + +from ._internal import _enum, _from_dict, _repeated_dict + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class DatabaseCatalog: + name: str + """The name of the catalog in UC.""" + + database_instance_name: str + """The name of the DatabaseInstance housing the database.""" + + database_name: str + """The name of the database (in a instance) associated with the catalog.""" + + create_database_if_not_exists: Optional[bool] = None + + uid: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the DatabaseCatalog into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.create_database_if_not_exists is not None: + body["create_database_if_not_exists"] = self.create_database_if_not_exists + if self.database_instance_name is not None: + body["database_instance_name"] = self.database_instance_name + if self.database_name is not None: + body["database_name"] = self.database_name + if self.name is not None: + body["name"] = self.name + if self.uid is not None: + body["uid"] = self.uid + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseCatalog into a shallow dictionary of its immediate attributes.""" + body = {} + if self.create_database_if_not_exists is not None: + body["create_database_if_not_exists"] = self.create_database_if_not_exists + if self.database_instance_name is not None: + body["database_instance_name"] = self.database_instance_name + if self.database_name is not None: + body["database_name"] = self.database_name + if self.name is not None: + body["name"] = self.name + if self.uid is not None: + body["uid"] = self.uid + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseCatalog: + """Deserializes the DatabaseCatalog from a dictionary.""" + return cls( + create_database_if_not_exists=d.get("create_database_if_not_exists", None), + database_instance_name=d.get("database_instance_name", None), + database_name=d.get("database_name", None), + name=d.get("name", None), + uid=d.get("uid", None), + ) + + +@dataclass +class DatabaseCredential: + token: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the DatabaseCredential into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.token is not None: + body["token"] = self.token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseCredential into a shallow dictionary of its immediate attributes.""" + body = {} + if self.token is not None: + body["token"] = self.token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseCredential: + """Deserializes the DatabaseCredential from a dictionary.""" + return cls(token=d.get("token", None)) + + +@dataclass +class DatabaseInstance: + """A DatabaseInstance represents a logical Postgres instance, comprised of both compute and + storage.""" + + name: str + """The name of the instance. This is the unique identifier for the instance.""" + + capacity: Optional[str] = None + """The sku of the instance. Valid values are "CU_1", "CU_2", "CU_4", "CU_8".""" + + creation_time: Optional[str] = None + """The timestamp when the instance was created.""" + + creator: Optional[str] = None + """The email of the creator of the instance.""" + + pg_version: Optional[str] = None + """The version of Postgres running on the instance.""" + + read_write_dns: Optional[str] = None + """The DNS endpoint to connect to the instance for read+write access.""" + + state: Optional[DatabaseInstanceState] = None + """The current state of the instance.""" + + stopped: Optional[bool] = None + """Whether the instance is stopped.""" + + uid: Optional[str] = None + """An immutable UUID identifier for the instance.""" + + def as_dict(self) -> dict: + """Serializes the DatabaseInstance into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.capacity is not None: + body["capacity"] = self.capacity + if self.creation_time is not None: + body["creation_time"] = self.creation_time + if self.creator is not None: + body["creator"] = self.creator + if self.name is not None: + body["name"] = self.name + if self.pg_version is not None: + body["pg_version"] = self.pg_version + if self.read_write_dns is not None: + body["read_write_dns"] = self.read_write_dns + if self.state is not None: + body["state"] = self.state.value + if self.stopped is not None: + body["stopped"] = self.stopped + if self.uid is not None: + body["uid"] = self.uid + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseInstance into a shallow dictionary of its immediate attributes.""" + body = {} + if self.capacity is not None: + body["capacity"] = self.capacity + if self.creation_time is not None: + body["creation_time"] = self.creation_time + if self.creator is not None: + body["creator"] = self.creator + if self.name is not None: + body["name"] = self.name + if self.pg_version is not None: + body["pg_version"] = self.pg_version + if self.read_write_dns is not None: + body["read_write_dns"] = self.read_write_dns + if self.state is not None: + body["state"] = self.state + if self.stopped is not None: + body["stopped"] = self.stopped + if self.uid is not None: + body["uid"] = self.uid + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseInstance: + """Deserializes the DatabaseInstance from a dictionary.""" + return cls( + capacity=d.get("capacity", None), + creation_time=d.get("creation_time", None), + creator=d.get("creator", None), + name=d.get("name", None), + pg_version=d.get("pg_version", None), + read_write_dns=d.get("read_write_dns", None), + state=_enum(d, "state", DatabaseInstanceState), + stopped=d.get("stopped", None), + uid=d.get("uid", None), + ) + + +class DatabaseInstanceState(Enum): + + AVAILABLE = "AVAILABLE" + DELETING = "DELETING" + FAILING_OVER = "FAILING_OVER" + STARTING = "STARTING" + STOPPED = "STOPPED" + UPDATING = "UPDATING" + + +@dataclass +class DatabaseTable: + """Next field marker: 13""" + + name: str + """Full three-part (catalog, schema, table) name of the table.""" + + database_instance_name: Optional[str] = None + """Name of the target database instance. This is required when creating database tables in standard + catalogs. This is optional when creating database tables in registered catalogs. If this field + is specified when creating database tables in registered catalogs, the database instance name + MUST match that of the registered catalog (or the request will be rejected).""" + + logical_database_name: Optional[str] = None + """Target Postgres database object (logical database) name for this table. This field is optional + in all scenarios. + + When creating a table in a registered Postgres catalog, the target Postgres database name is + inferred to be that of the registered catalog. If this field is specified in this scenario, the + Postgres database name MUST match that of the registered catalog (or the request will be + rejected). + + When creating a table in a standard catalog, the target database name is inferred to be that of + the standard catalog. In this scenario, specifying this field will allow targeting an arbitrary + postgres database. Note that this has implications for the `create_database_objects_is_missing` + field in `spec`.""" + + table_serving_url: Optional[str] = None + """Data serving REST API URL for this table""" + + def as_dict(self) -> dict: + """Serializes the DatabaseTable into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.database_instance_name is not None: + body["database_instance_name"] = self.database_instance_name + if self.logical_database_name is not None: + body["logical_database_name"] = self.logical_database_name + if self.name is not None: + body["name"] = self.name + if self.table_serving_url is not None: + body["table_serving_url"] = self.table_serving_url + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DatabaseTable into a shallow dictionary of its immediate attributes.""" + body = {} + if self.database_instance_name is not None: + body["database_instance_name"] = self.database_instance_name + if self.logical_database_name is not None: + body["logical_database_name"] = self.logical_database_name + if self.name is not None: + body["name"] = self.name + if self.table_serving_url is not None: + body["table_serving_url"] = self.table_serving_url + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DatabaseTable: + """Deserializes the DatabaseTable from a dictionary.""" + return cls( + database_instance_name=d.get("database_instance_name", None), + logical_database_name=d.get("logical_database_name", None), + name=d.get("name", None), + table_serving_url=d.get("table_serving_url", None), + ) + + +@dataclass +class DeleteDatabaseCatalogResponse: + def as_dict(self) -> dict: + """Serializes the DeleteDatabaseCatalogResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DeleteDatabaseCatalogResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DeleteDatabaseCatalogResponse: + """Deserializes the DeleteDatabaseCatalogResponse from a dictionary.""" + return cls() + + +@dataclass +class DeleteDatabaseInstanceResponse: + def as_dict(self) -> dict: + """Serializes the DeleteDatabaseInstanceResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DeleteDatabaseInstanceResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DeleteDatabaseInstanceResponse: + """Deserializes the DeleteDatabaseInstanceResponse from a dictionary.""" + return cls() + + +@dataclass +class DeleteDatabaseTableResponse: + def as_dict(self) -> dict: + """Serializes the DeleteDatabaseTableResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DeleteDatabaseTableResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DeleteDatabaseTableResponse: + """Deserializes the DeleteDatabaseTableResponse from a dictionary.""" + return cls() + + +@dataclass +class DeleteSyncedDatabaseTableResponse: + def as_dict(self) -> dict: + """Serializes the DeleteSyncedDatabaseTableResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DeleteSyncedDatabaseTableResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DeleteSyncedDatabaseTableResponse: + """Deserializes the DeleteSyncedDatabaseTableResponse from a dictionary.""" + return cls() + + +@dataclass +class GenerateDatabaseCredentialRequest: + """Generates a credential that can be used to access database instances""" + + instance_names: Optional[List[str]] = None + """Instances to which the token will be scoped.""" + + request_id: Optional[str] = None + + def as_dict(self) -> dict: + """Serializes the GenerateDatabaseCredentialRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.instance_names: + body["instance_names"] = [v for v in self.instance_names] + if self.request_id is not None: + body["request_id"] = self.request_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the GenerateDatabaseCredentialRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.instance_names: + body["instance_names"] = self.instance_names + if self.request_id is not None: + body["request_id"] = self.request_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> GenerateDatabaseCredentialRequest: + """Deserializes the GenerateDatabaseCredentialRequest from a dictionary.""" + return cls(instance_names=d.get("instance_names", None), request_id=d.get("request_id", None)) + + +@dataclass +class ListDatabaseInstancesResponse: + database_instances: Optional[List[DatabaseInstance]] = None + """List of instances.""" + + next_page_token: Optional[str] = None + """Pagination token to request the next page of instances.""" + + def as_dict(self) -> dict: + """Serializes the ListDatabaseInstancesResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.database_instances: + body["database_instances"] = [v.as_dict() for v in self.database_instances] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListDatabaseInstancesResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.database_instances: + body["database_instances"] = self.database_instances + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListDatabaseInstancesResponse: + """Deserializes the ListDatabaseInstancesResponse from a dictionary.""" + return cls( + database_instances=_repeated_dict(d, "database_instances", DatabaseInstance), + next_page_token=d.get("next_page_token", None), + ) + + +@dataclass +class NewPipelineSpec: + """Custom fields that user can set for pipeline while creating SyncedDatabaseTable. Note that other + fields of pipeline are still inferred by table def internally""" + + storage_catalog: Optional[str] = None + """UC catalog for the pipeline to store intermediate files (checkpoints, event logs etc). This + needs to be a standard catalog where the user has permissions to create Delta tables.""" + + storage_schema: Optional[str] = None + """UC schema for the pipeline to store intermediate files (checkpoints, event logs etc). This needs + to be in the standard catalog where the user has permissions to create Delta tables.""" + + def as_dict(self) -> dict: + """Serializes the NewPipelineSpec into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.storage_catalog is not None: + body["storage_catalog"] = self.storage_catalog + if self.storage_schema is not None: + body["storage_schema"] = self.storage_schema + return body + + def as_shallow_dict(self) -> dict: + """Serializes the NewPipelineSpec into a shallow dictionary of its immediate attributes.""" + body = {} + if self.storage_catalog is not None: + body["storage_catalog"] = self.storage_catalog + if self.storage_schema is not None: + body["storage_schema"] = self.storage_schema + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> NewPipelineSpec: + """Deserializes the NewPipelineSpec from a dictionary.""" + return cls(storage_catalog=d.get("storage_catalog", None), storage_schema=d.get("storage_schema", None)) + + +class ProvisioningInfoState(Enum): + + ACTIVE = "ACTIVE" + DEGRADED = "DEGRADED" + DELETING = "DELETING" + FAILED = "FAILED" + PROVISIONING = "PROVISIONING" + UPDATING = "UPDATING" + + +@dataclass +class SyncedDatabaseTable: + """Next field marker: 12""" + + name: str + """Full three-part (catalog, schema, table) name of the table.""" + + data_synchronization_status: Optional[SyncedTableStatus] = None + """Synced Table data synchronization status""" + + database_instance_name: Optional[str] = None + """Name of the target database instance. This is required when creating synced database tables in + standard catalogs. This is optional when creating synced database tables in registered catalogs. + If this field is specified when creating synced database tables in registered catalogs, the + database instance name MUST match that of the registered catalog (or the request will be + rejected).""" + + logical_database_name: Optional[str] = None + """Target Postgres database object (logical database) name for this table. This field is optional + in all scenarios. + + When creating a synced table in a registered Postgres catalog, the target Postgres database name + is inferred to be that of the registered catalog. If this field is specified in this scenario, + the Postgres database name MUST match that of the registered catalog (or the request will be + rejected). + + When creating a synced table in a standard catalog, the target database name is inferred to be + that of the standard catalog. In this scenario, specifying this field will allow targeting an + arbitrary postgres database.""" + + spec: Optional[SyncedTableSpec] = None + """Specification of a synced database table.""" + + table_serving_url: Optional[str] = None + """Data serving REST API URL for this table""" + + unity_catalog_provisioning_state: Optional[ProvisioningInfoState] = None + """The provisioning state of the synced table entity in Unity Catalog. This is distinct from the + state of the data synchronization pipeline (i.e. the table may be in "ACTIVE" but the pipeline + may be in "PROVISIONING" as it runs asynchronously).""" + + def as_dict(self) -> dict: + """Serializes the SyncedDatabaseTable into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.data_synchronization_status: + body["data_synchronization_status"] = self.data_synchronization_status.as_dict() + if self.database_instance_name is not None: + body["database_instance_name"] = self.database_instance_name + if self.logical_database_name is not None: + body["logical_database_name"] = self.logical_database_name + if self.name is not None: + body["name"] = self.name + if self.spec: + body["spec"] = self.spec.as_dict() + if self.table_serving_url is not None: + body["table_serving_url"] = self.table_serving_url + if self.unity_catalog_provisioning_state is not None: + body["unity_catalog_provisioning_state"] = self.unity_catalog_provisioning_state.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SyncedDatabaseTable into a shallow dictionary of its immediate attributes.""" + body = {} + if self.data_synchronization_status: + body["data_synchronization_status"] = self.data_synchronization_status + if self.database_instance_name is not None: + body["database_instance_name"] = self.database_instance_name + if self.logical_database_name is not None: + body["logical_database_name"] = self.logical_database_name + if self.name is not None: + body["name"] = self.name + if self.spec: + body["spec"] = self.spec + if self.table_serving_url is not None: + body["table_serving_url"] = self.table_serving_url + if self.unity_catalog_provisioning_state is not None: + body["unity_catalog_provisioning_state"] = self.unity_catalog_provisioning_state + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SyncedDatabaseTable: + """Deserializes the SyncedDatabaseTable from a dictionary.""" + return cls( + data_synchronization_status=_from_dict(d, "data_synchronization_status", SyncedTableStatus), + database_instance_name=d.get("database_instance_name", None), + logical_database_name=d.get("logical_database_name", None), + name=d.get("name", None), + spec=_from_dict(d, "spec", SyncedTableSpec), + table_serving_url=d.get("table_serving_url", None), + unity_catalog_provisioning_state=_enum(d, "unity_catalog_provisioning_state", ProvisioningInfoState), + ) + + +@dataclass +class SyncedTableContinuousUpdateStatus: + """Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE + or the SYNCED_UPDATING_PIPELINE_RESOURCES state.""" + + initial_pipeline_sync_progress: Optional[SyncedTablePipelineProgress] = None + """Progress of the initial data synchronization.""" + + last_processed_commit_version: Optional[int] = None + """The last source table Delta version that was synced to the synced table. Note that this Delta + version may not be completely synced to the synced table yet.""" + + timestamp: Optional[str] = None + """The timestamp of the last time any data was synchronized from the source table to the synced + table.""" + + def as_dict(self) -> dict: + """Serializes the SyncedTableContinuousUpdateStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.initial_pipeline_sync_progress: + body["initial_pipeline_sync_progress"] = self.initial_pipeline_sync_progress.as_dict() + if self.last_processed_commit_version is not None: + body["last_processed_commit_version"] = self.last_processed_commit_version + if self.timestamp is not None: + body["timestamp"] = self.timestamp + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SyncedTableContinuousUpdateStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.initial_pipeline_sync_progress: + body["initial_pipeline_sync_progress"] = self.initial_pipeline_sync_progress + if self.last_processed_commit_version is not None: + body["last_processed_commit_version"] = self.last_processed_commit_version + if self.timestamp is not None: + body["timestamp"] = self.timestamp + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SyncedTableContinuousUpdateStatus: + """Deserializes the SyncedTableContinuousUpdateStatus from a dictionary.""" + return cls( + initial_pipeline_sync_progress=_from_dict(d, "initial_pipeline_sync_progress", SyncedTablePipelineProgress), + last_processed_commit_version=d.get("last_processed_commit_version", None), + timestamp=d.get("timestamp", None), + ) + + +@dataclass +class SyncedTableFailedStatus: + """Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the + SYNCED_PIPELINE_FAILED state.""" + + last_processed_commit_version: Optional[int] = None + """The last source table Delta version that was synced to the synced table. Note that this Delta + version may only be partially synced to the synced table. Only populated if the table is still + synced and available for serving.""" + + timestamp: Optional[str] = None + """The timestamp of the last time any data was synchronized from the source table to the synced + table. Only populated if the table is still synced and available for serving.""" + + def as_dict(self) -> dict: + """Serializes the SyncedTableFailedStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.last_processed_commit_version is not None: + body["last_processed_commit_version"] = self.last_processed_commit_version + if self.timestamp is not None: + body["timestamp"] = self.timestamp + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SyncedTableFailedStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.last_processed_commit_version is not None: + body["last_processed_commit_version"] = self.last_processed_commit_version + if self.timestamp is not None: + body["timestamp"] = self.timestamp + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SyncedTableFailedStatus: + """Deserializes the SyncedTableFailedStatus from a dictionary.""" + return cls( + last_processed_commit_version=d.get("last_processed_commit_version", None), + timestamp=d.get("timestamp", None), + ) + + +@dataclass +class SyncedTablePipelineProgress: + """Progress information of the Synced Table data synchronization pipeline.""" + + estimated_completion_time_seconds: Optional[float] = None + """The estimated time remaining to complete this update in seconds.""" + + latest_version_currently_processing: Optional[int] = None + """The source table Delta version that was last processed by the pipeline. The pipeline may not + have completely processed this version yet.""" + + sync_progress_completion: Optional[float] = None + """The completion ratio of this update. This is a number between 0 and 1.""" + + synced_row_count: Optional[int] = None + """The number of rows that have been synced in this update.""" + + total_row_count: Optional[int] = None + """The total number of rows that need to be synced in this update. This number may be an estimate.""" + + def as_dict(self) -> dict: + """Serializes the SyncedTablePipelineProgress into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.estimated_completion_time_seconds is not None: + body["estimated_completion_time_seconds"] = self.estimated_completion_time_seconds + if self.latest_version_currently_processing is not None: + body["latest_version_currently_processing"] = self.latest_version_currently_processing + if self.sync_progress_completion is not None: + body["sync_progress_completion"] = self.sync_progress_completion + if self.synced_row_count is not None: + body["synced_row_count"] = self.synced_row_count + if self.total_row_count is not None: + body["total_row_count"] = self.total_row_count + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SyncedTablePipelineProgress into a shallow dictionary of its immediate attributes.""" + body = {} + if self.estimated_completion_time_seconds is not None: + body["estimated_completion_time_seconds"] = self.estimated_completion_time_seconds + if self.latest_version_currently_processing is not None: + body["latest_version_currently_processing"] = self.latest_version_currently_processing + if self.sync_progress_completion is not None: + body["sync_progress_completion"] = self.sync_progress_completion + if self.synced_row_count is not None: + body["synced_row_count"] = self.synced_row_count + if self.total_row_count is not None: + body["total_row_count"] = self.total_row_count + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SyncedTablePipelineProgress: + """Deserializes the SyncedTablePipelineProgress from a dictionary.""" + return cls( + estimated_completion_time_seconds=d.get("estimated_completion_time_seconds", None), + latest_version_currently_processing=d.get("latest_version_currently_processing", None), + sync_progress_completion=d.get("sync_progress_completion", None), + synced_row_count=d.get("synced_row_count", None), + total_row_count=d.get("total_row_count", None), + ) + + +@dataclass +class SyncedTableProvisioningStatus: + """Detailed status of a synced table. Shown if the synced table is in the + PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.""" + + initial_pipeline_sync_progress: Optional[SyncedTablePipelineProgress] = None + """Details about initial data synchronization. Only populated when in the + PROVISIONING_INITIAL_SNAPSHOT state.""" + + def as_dict(self) -> dict: + """Serializes the SyncedTableProvisioningStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.initial_pipeline_sync_progress: + body["initial_pipeline_sync_progress"] = self.initial_pipeline_sync_progress.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SyncedTableProvisioningStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.initial_pipeline_sync_progress: + body["initial_pipeline_sync_progress"] = self.initial_pipeline_sync_progress + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SyncedTableProvisioningStatus: + """Deserializes the SyncedTableProvisioningStatus from a dictionary.""" + return cls( + initial_pipeline_sync_progress=_from_dict(d, "initial_pipeline_sync_progress", SyncedTablePipelineProgress) + ) + + +class SyncedTableSchedulingPolicy(Enum): + + CONTINUOUS = "CONTINUOUS" + SNAPSHOT = "SNAPSHOT" + TRIGGERED = "TRIGGERED" + + +@dataclass +class SyncedTableSpec: + """Specification of a synced database table.""" + + create_database_objects_if_missing: Optional[bool] = None + """If true, the synced table's logical database and schema resources in PG will be created if they + do not already exist.""" + + new_pipeline_spec: Optional[NewPipelineSpec] = None + """Spec of new pipeline. Should be empty if pipeline_id is set""" + + pipeline_id: Optional[str] = None + """ID of the associated pipeline. Should be empty if new_pipeline_spec is set""" + + primary_key_columns: Optional[List[str]] = None + """Primary Key columns to be used for data insert/update in the destination.""" + + scheduling_policy: Optional[SyncedTableSchedulingPolicy] = None + """Scheduling policy of the underlying pipeline.""" + + source_table_full_name: Optional[str] = None + """Three-part (catalog, schema, table) name of the source Delta table.""" + + timeseries_key: Optional[str] = None + """Time series key to deduplicate (tie-break) rows with the same primary key.""" + + def as_dict(self) -> dict: + """Serializes the SyncedTableSpec into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.create_database_objects_if_missing is not None: + body["create_database_objects_if_missing"] = self.create_database_objects_if_missing + if self.new_pipeline_spec: + body["new_pipeline_spec"] = self.new_pipeline_spec.as_dict() + if self.pipeline_id is not None: + body["pipeline_id"] = self.pipeline_id + if self.primary_key_columns: + body["primary_key_columns"] = [v for v in self.primary_key_columns] + if self.scheduling_policy is not None: + body["scheduling_policy"] = self.scheduling_policy.value + if self.source_table_full_name is not None: + body["source_table_full_name"] = self.source_table_full_name + if self.timeseries_key is not None: + body["timeseries_key"] = self.timeseries_key + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SyncedTableSpec into a shallow dictionary of its immediate attributes.""" + body = {} + if self.create_database_objects_if_missing is not None: + body["create_database_objects_if_missing"] = self.create_database_objects_if_missing + if self.new_pipeline_spec: + body["new_pipeline_spec"] = self.new_pipeline_spec + if self.pipeline_id is not None: + body["pipeline_id"] = self.pipeline_id + if self.primary_key_columns: + body["primary_key_columns"] = self.primary_key_columns + if self.scheduling_policy is not None: + body["scheduling_policy"] = self.scheduling_policy + if self.source_table_full_name is not None: + body["source_table_full_name"] = self.source_table_full_name + if self.timeseries_key is not None: + body["timeseries_key"] = self.timeseries_key + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SyncedTableSpec: + """Deserializes the SyncedTableSpec from a dictionary.""" + return cls( + create_database_objects_if_missing=d.get("create_database_objects_if_missing", None), + new_pipeline_spec=_from_dict(d, "new_pipeline_spec", NewPipelineSpec), + pipeline_id=d.get("pipeline_id", None), + primary_key_columns=d.get("primary_key_columns", None), + scheduling_policy=_enum(d, "scheduling_policy", SyncedTableSchedulingPolicy), + source_table_full_name=d.get("source_table_full_name", None), + timeseries_key=d.get("timeseries_key", None), + ) + + +class SyncedTableState(Enum): + """The state of a synced table.""" + + SYNCED_TABLED_OFFLINE = "SYNCED_TABLED_OFFLINE" + SYNCED_TABLE_OFFLINE_FAILED = "SYNCED_TABLE_OFFLINE_FAILED" + SYNCED_TABLE_ONLINE = "SYNCED_TABLE_ONLINE" + SYNCED_TABLE_ONLINE_CONTINUOUS_UPDATE = "SYNCED_TABLE_ONLINE_CONTINUOUS_UPDATE" + SYNCED_TABLE_ONLINE_NO_PENDING_UPDATE = "SYNCED_TABLE_ONLINE_NO_PENDING_UPDATE" + SYNCED_TABLE_ONLINE_PIPELINE_FAILED = "SYNCED_TABLE_ONLINE_PIPELINE_FAILED" + SYNCED_TABLE_ONLINE_TRIGGERED_UPDATE = "SYNCED_TABLE_ONLINE_TRIGGERED_UPDATE" + SYNCED_TABLE_ONLINE_UPDATING_PIPELINE_RESOURCES = "SYNCED_TABLE_ONLINE_UPDATING_PIPELINE_RESOURCES" + SYNCED_TABLE_PROVISIONING = "SYNCED_TABLE_PROVISIONING" + SYNCED_TABLE_PROVISIONING_INITIAL_SNAPSHOT = "SYNCED_TABLE_PROVISIONING_INITIAL_SNAPSHOT" + SYNCED_TABLE_PROVISIONING_PIPELINE_RESOURCES = "SYNCED_TABLE_PROVISIONING_PIPELINE_RESOURCES" + + +@dataclass +class SyncedTableStatus: + """Status of a synced table.""" + + continuous_update_status: Optional[SyncedTableContinuousUpdateStatus] = None + """Detailed status of a synced table. Shown if the synced table is in the SYNCED_CONTINUOUS_UPDATE + or the SYNCED_UPDATING_PIPELINE_RESOURCES state.""" + + detailed_state: Optional[SyncedTableState] = None + """The state of the synced table.""" + + failed_status: Optional[SyncedTableFailedStatus] = None + """Detailed status of a synced table. Shown if the synced table is in the OFFLINE_FAILED or the + SYNCED_PIPELINE_FAILED state.""" + + message: Optional[str] = None + """A text description of the current state of the synced table.""" + + provisioning_status: Optional[SyncedTableProvisioningStatus] = None + """Detailed status of a synced table. Shown if the synced table is in the + PROVISIONING_PIPELINE_RESOURCES or the PROVISIONING_INITIAL_SNAPSHOT state.""" + + triggered_update_status: Optional[SyncedTableTriggeredUpdateStatus] = None + """Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE + or the SYNCED_NO_PENDING_UPDATE state.""" + + def as_dict(self) -> dict: + """Serializes the SyncedTableStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.continuous_update_status: + body["continuous_update_status"] = self.continuous_update_status.as_dict() + if self.detailed_state is not None: + body["detailed_state"] = self.detailed_state.value + if self.failed_status: + body["failed_status"] = self.failed_status.as_dict() + if self.message is not None: + body["message"] = self.message + if self.provisioning_status: + body["provisioning_status"] = self.provisioning_status.as_dict() + if self.triggered_update_status: + body["triggered_update_status"] = self.triggered_update_status.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SyncedTableStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.continuous_update_status: + body["continuous_update_status"] = self.continuous_update_status + if self.detailed_state is not None: + body["detailed_state"] = self.detailed_state + if self.failed_status: + body["failed_status"] = self.failed_status + if self.message is not None: + body["message"] = self.message + if self.provisioning_status: + body["provisioning_status"] = self.provisioning_status + if self.triggered_update_status: + body["triggered_update_status"] = self.triggered_update_status + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SyncedTableStatus: + """Deserializes the SyncedTableStatus from a dictionary.""" + return cls( + continuous_update_status=_from_dict(d, "continuous_update_status", SyncedTableContinuousUpdateStatus), + detailed_state=_enum(d, "detailed_state", SyncedTableState), + failed_status=_from_dict(d, "failed_status", SyncedTableFailedStatus), + message=d.get("message", None), + provisioning_status=_from_dict(d, "provisioning_status", SyncedTableProvisioningStatus), + triggered_update_status=_from_dict(d, "triggered_update_status", SyncedTableTriggeredUpdateStatus), + ) + + +@dataclass +class SyncedTableTriggeredUpdateStatus: + """Detailed status of a synced table. Shown if the synced table is in the SYNCED_TRIGGERED_UPDATE + or the SYNCED_NO_PENDING_UPDATE state.""" + + last_processed_commit_version: Optional[int] = None + """The last source table Delta version that was synced to the synced table. Note that this Delta + version may not be completely synced to the synced table yet.""" + + timestamp: Optional[str] = None + """The timestamp of the last time any data was synchronized from the source table to the synced + table.""" + + triggered_update_progress: Optional[SyncedTablePipelineProgress] = None + """Progress of the active data synchronization pipeline.""" + + def as_dict(self) -> dict: + """Serializes the SyncedTableTriggeredUpdateStatus into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.last_processed_commit_version is not None: + body["last_processed_commit_version"] = self.last_processed_commit_version + if self.timestamp is not None: + body["timestamp"] = self.timestamp + if self.triggered_update_progress: + body["triggered_update_progress"] = self.triggered_update_progress.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SyncedTableTriggeredUpdateStatus into a shallow dictionary of its immediate attributes.""" + body = {} + if self.last_processed_commit_version is not None: + body["last_processed_commit_version"] = self.last_processed_commit_version + if self.timestamp is not None: + body["timestamp"] = self.timestamp + if self.triggered_update_progress: + body["triggered_update_progress"] = self.triggered_update_progress + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SyncedTableTriggeredUpdateStatus: + """Deserializes the SyncedTableTriggeredUpdateStatus from a dictionary.""" + return cls( + last_processed_commit_version=d.get("last_processed_commit_version", None), + timestamp=d.get("timestamp", None), + triggered_update_progress=_from_dict(d, "triggered_update_progress", SyncedTablePipelineProgress), + ) + + +class DatabaseAPI: + """Database Instances provide access to a database via REST API or direct SQL.""" + + def __init__(self, api_client): + self._api = api_client + + def create_database_catalog(self, catalog: DatabaseCatalog) -> DatabaseCatalog: + """Create a Database Catalog. + + :param catalog: :class:`DatabaseCatalog` + + :returns: :class:`DatabaseCatalog` + """ + body = catalog.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/database/catalogs", body=body, headers=headers) + return DatabaseCatalog.from_dict(res) + + def create_database_instance(self, database_instance: DatabaseInstance) -> DatabaseInstance: + """Create a Database Instance. + + :param database_instance: :class:`DatabaseInstance` + A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage. + + :returns: :class:`DatabaseInstance` + """ + body = database_instance.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/database/instances", body=body, headers=headers) + return DatabaseInstance.from_dict(res) + + def create_database_table(self, table: DatabaseTable) -> DatabaseTable: + """Create a Database Table. + + :param table: :class:`DatabaseTable` + Next field marker: 13 + + :returns: :class:`DatabaseTable` + """ + body = table.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/database/tables", body=body, headers=headers) + return DatabaseTable.from_dict(res) + + def create_synced_database_table(self, synced_table: SyncedDatabaseTable) -> SyncedDatabaseTable: + """Create a Synced Database Table. + + :param synced_table: :class:`SyncedDatabaseTable` + Next field marker: 12 + + :returns: :class:`SyncedDatabaseTable` + """ + body = synced_table.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/database/synced_tables", body=body, headers=headers) + return SyncedDatabaseTable.from_dict(res) + + def delete_database_catalog(self, name: str): + """Delete a Database Catalog. + + :param name: str + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/database/catalogs/{name}", headers=headers) + + def delete_database_instance(self, name: str, *, force: Optional[bool] = None, purge: Optional[bool] = None): + """Delete a Database Instance. + + :param name: str + Name of the instance to delete. + :param force: bool (optional) + By default, a instance cannot be deleted if it has descendant instances created via PITR. If this + flag is specified as true, all descendent instances will be deleted as well. + :param purge: bool (optional) + If false, the database instance is soft deleted. Soft deleted instances behave as if they are + deleted, and cannot be used for CRUD operations nor connected to. However they can be undeleted by + calling the undelete API for a limited time. If true, the database instance is hard deleted and + cannot be undeleted. + + + """ + + query = {} + if force is not None: + query["force"] = force + if purge is not None: + query["purge"] = purge + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/database/instances/{name}", query=query, headers=headers) + + def delete_database_table(self, name: str): + """Delete a Database Table. + + :param name: str + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/database/tables/{name}", headers=headers) + + def delete_synced_database_table(self, name: str): + """Delete a Synced Database Table. + + :param name: str + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/database/synced_tables/{name}", headers=headers) + + def find_database_instance_by_uid(self, *, uid: Optional[str] = None) -> DatabaseInstance: + """Find a Database Instance by uid. + + :param uid: str (optional) + UID of the cluster to get. + + :returns: :class:`DatabaseInstance` + """ + + query = {} + if uid is not None: + query["uid"] = uid + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", "/api/2.0/database/instances:findByUid", query=query, headers=headers) + return DatabaseInstance.from_dict(res) + + def generate_database_credential( + self, *, instance_names: Optional[List[str]] = None, request_id: Optional[str] = None + ) -> DatabaseCredential: + """Generates a credential that can be used to access database instances. + + :param instance_names: List[str] (optional) + Instances to which the token will be scoped. + :param request_id: str (optional) + + :returns: :class:`DatabaseCredential` + """ + body = {} + if instance_names is not None: + body["instance_names"] = [v for v in instance_names] + if request_id is not None: + body["request_id"] = request_id + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/database/credentials", body=body, headers=headers) + return DatabaseCredential.from_dict(res) + + def get_database_catalog(self, name: str) -> DatabaseCatalog: + """Get a Database Catalog. + + :param name: str + + :returns: :class:`DatabaseCatalog` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/database/catalogs/{name}", headers=headers) + return DatabaseCatalog.from_dict(res) + + def get_database_instance(self, name: str) -> DatabaseInstance: + """Get a Database Instance. + + :param name: str + Name of the cluster to get. + + :returns: :class:`DatabaseInstance` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/database/instances/{name}", headers=headers) + return DatabaseInstance.from_dict(res) + + def get_database_table(self, name: str) -> DatabaseTable: + """Get a Database Table. + + :param name: str + + :returns: :class:`DatabaseTable` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/database/tables/{name}", headers=headers) + return DatabaseTable.from_dict(res) + + def get_synced_database_table(self, name: str) -> SyncedDatabaseTable: + """Get a Synced Database Table. + + :param name: str + + :returns: :class:`SyncedDatabaseTable` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/database/synced_tables/{name}", headers=headers) + return SyncedDatabaseTable.from_dict(res) + + def list_database_instances( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[DatabaseInstance]: + """List Database Instances. + + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of Database Instances. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseInstance` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do("GET", "/api/2.0/database/instances", query=query, headers=headers) + if "database_instances" in json: + for v in json["database_instances"]: + yield DatabaseInstance.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def update_database_instance( + self, name: str, database_instance: DatabaseInstance, update_mask: str + ) -> DatabaseInstance: + """Update a Database Instance. + + :param name: str + The name of the instance. This is the unique identifier for the instance. + :param database_instance: :class:`DatabaseInstance` + A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage. + :param update_mask: str + The list of fields to update. + + :returns: :class:`DatabaseInstance` + """ + body = database_instance.as_dict() + query = {} + if update_mask is not None: + query["update_mask"] = update_mask + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PATCH", f"/api/2.0/database/instances/{name}", query=query, body=body, headers=headers) + return DatabaseInstance.from_dict(res) diff --git a/databricks/sdk/service/files.py b/databricks/sdk/service/files.py index 52496e84b..a50e83a30 100755 --- a/databricks/sdk/service/files.py +++ b/databricks/sdk/service/files.py @@ -1049,6 +1049,8 @@ class FilesAPI: `enable_experimental_files_api_client = True` in your configuration profile or use the environment variable `DATABRICKS_ENABLE_EXPERIMENTAL_FILES_API_CLIENT=True`. + Use of Files API may incur Databricks data transfer charges. + [Unity Catalog volumes]: https://docs.databricks.com/en/connect/unity-catalog/volumes.html""" def __init__(self, api_client): diff --git a/databricks/sdk/service/iam.py b/databricks/sdk/service/iam.py index 0d8c72fe8..3b43f74b0 100755 --- a/databricks/sdk/service/iam.py +++ b/databricks/sdk/service/iam.py @@ -2547,7 +2547,7 @@ def list( seen = set() query["startIndex"] = 1 if "count" not in query: - query["count"] = 100 + query["count"] = 10000 while True: json = self._api.do( "GET", f"/api/2.0/accounts/{self._api.account_id}/scim/v2/Groups", query=query, headers=headers @@ -2827,7 +2827,7 @@ def list( seen = set() query["startIndex"] = 1 if "count" not in query: - query["count"] = 100 + query["count"] = 10000 while True: json = self._api.do( "GET", @@ -3185,7 +3185,7 @@ def list( seen = set() query["startIndex"] = 1 if "count" not in query: - query["count"] = 100 + query["count"] = 10000 while True: json = self._api.do( "GET", f"/api/2.0/accounts/{self._api.account_id}/scim/v2/Users", query=query, headers=headers @@ -3495,7 +3495,7 @@ def list( seen = set() query["startIndex"] = 1 if "count" not in query: - query["count"] = 100 + query["count"] = 10000 while True: json = self._api.do("GET", "/api/2.0/preview/scim/v2/Groups", query=query, headers=headers) if "Resources" in json: @@ -3955,7 +3955,7 @@ def list( seen = set() query["startIndex"] = 1 if "count" not in query: - query["count"] = 100 + query["count"] = 10000 while True: json = self._api.do("GET", "/api/2.0/preview/scim/v2/ServicePrincipals", query=query, headers=headers) if "Resources" in json: @@ -4324,7 +4324,7 @@ def list( seen = set() query["startIndex"] = 1 if "count" not in query: - query["count"] = 100 + query["count"] = 10000 while True: json = self._api.do("GET", "/api/2.0/preview/scim/v2/Users", query=query, headers=headers) if "Resources" in json: diff --git a/databricks/sdk/service/jobs.py b/databricks/sdk/service/jobs.py index 1cb0ac4a7..3d3635e2a 100755 --- a/databricks/sdk/service/jobs.py +++ b/databricks/sdk/service/jobs.py @@ -54,6 +54,9 @@ class BaseJob: """Settings for this job and all of its runs. These settings can be updated using the `resetJob` method.""" + trigger_state: Optional[TriggerStateProto] = None + """State of the trigger associated with the job.""" + def as_dict(self) -> dict: """Serializes the BaseJob into a dictionary suitable for use as a JSON request body.""" body = {} @@ -69,6 +72,8 @@ def as_dict(self) -> dict: body["job_id"] = self.job_id if self.settings: body["settings"] = self.settings.as_dict() + if self.trigger_state: + body["trigger_state"] = self.trigger_state.as_dict() return body def as_shallow_dict(self) -> dict: @@ -86,6 +91,8 @@ def as_shallow_dict(self) -> dict: body["job_id"] = self.job_id if self.settings: body["settings"] = self.settings + if self.trigger_state: + body["trigger_state"] = self.trigger_state return body @classmethod @@ -98,6 +105,7 @@ def from_dict(cls, d: Dict[str, Any]) -> BaseJob: has_more=d.get("has_more", None), job_id=d.get("job_id", None), settings=_from_dict(d, "settings", JobSettings), + trigger_state=_from_dict(d, "trigger_state", TriggerStateProto), ) @@ -1389,6 +1397,148 @@ def from_dict(cls, d: Dict[str, Any]) -> DashboardTaskOutput: return cls(page_snapshots=_repeated_dict(d, "page_snapshots", DashboardPageSnapshot)) +@dataclass +class DbtCloudJobRunStep: + """Format of response retrieved from dbt Cloud, for inclusion in output""" + + index: Optional[int] = None + """Orders the steps in the job""" + + logs: Optional[str] = None + """Output of the step""" + + name: Optional[str] = None + """Name of the step in the job""" + + status: Optional[DbtCloudRunStatus] = None + """State of the step""" + + def as_dict(self) -> dict: + """Serializes the DbtCloudJobRunStep into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.index is not None: + body["index"] = self.index + if self.logs is not None: + body["logs"] = self.logs + if self.name is not None: + body["name"] = self.name + if self.status is not None: + body["status"] = self.status.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DbtCloudJobRunStep into a shallow dictionary of its immediate attributes.""" + body = {} + if self.index is not None: + body["index"] = self.index + if self.logs is not None: + body["logs"] = self.logs + if self.name is not None: + body["name"] = self.name + if self.status is not None: + body["status"] = self.status + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DbtCloudJobRunStep: + """Deserializes the DbtCloudJobRunStep from a dictionary.""" + return cls( + index=d.get("index", None), + logs=d.get("logs", None), + name=d.get("name", None), + status=_enum(d, "status", DbtCloudRunStatus), + ) + + +class DbtCloudRunStatus(Enum): + """Response enumeration from calling the dbt Cloud API, for inclusion in output""" + + CANCELLED = "CANCELLED" + ERROR = "ERROR" + QUEUED = "QUEUED" + RUNNING = "RUNNING" + STARTING = "STARTING" + SUCCESS = "SUCCESS" + + +@dataclass +class DbtCloudTask: + connection_resource_name: Optional[str] = None + """The resource name of the UC connection that authenticates the dbt Cloud for this task""" + + dbt_cloud_job_id: Optional[int] = None + """Id of the dbt Cloud job to be triggered""" + + def as_dict(self) -> dict: + """Serializes the DbtCloudTask into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.connection_resource_name is not None: + body["connection_resource_name"] = self.connection_resource_name + if self.dbt_cloud_job_id is not None: + body["dbt_cloud_job_id"] = self.dbt_cloud_job_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DbtCloudTask into a shallow dictionary of its immediate attributes.""" + body = {} + if self.connection_resource_name is not None: + body["connection_resource_name"] = self.connection_resource_name + if self.dbt_cloud_job_id is not None: + body["dbt_cloud_job_id"] = self.dbt_cloud_job_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DbtCloudTask: + """Deserializes the DbtCloudTask from a dictionary.""" + return cls( + connection_resource_name=d.get("connection_resource_name", None), + dbt_cloud_job_id=d.get("dbt_cloud_job_id", None), + ) + + +@dataclass +class DbtCloudTaskOutput: + dbt_cloud_job_run_id: Optional[int] = None + """Id of the job run in dbt Cloud""" + + dbt_cloud_job_run_output: Optional[List[DbtCloudJobRunStep]] = None + """Steps of the job run as received from dbt Cloud""" + + dbt_cloud_job_run_url: Optional[str] = None + """Url where full run details can be viewed""" + + def as_dict(self) -> dict: + """Serializes the DbtCloudTaskOutput into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.dbt_cloud_job_run_id is not None: + body["dbt_cloud_job_run_id"] = self.dbt_cloud_job_run_id + if self.dbt_cloud_job_run_output: + body["dbt_cloud_job_run_output"] = [v.as_dict() for v in self.dbt_cloud_job_run_output] + if self.dbt_cloud_job_run_url is not None: + body["dbt_cloud_job_run_url"] = self.dbt_cloud_job_run_url + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DbtCloudTaskOutput into a shallow dictionary of its immediate attributes.""" + body = {} + if self.dbt_cloud_job_run_id is not None: + body["dbt_cloud_job_run_id"] = self.dbt_cloud_job_run_id + if self.dbt_cloud_job_run_output: + body["dbt_cloud_job_run_output"] = self.dbt_cloud_job_run_output + if self.dbt_cloud_job_run_url is not None: + body["dbt_cloud_job_run_url"] = self.dbt_cloud_job_run_url + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DbtCloudTaskOutput: + """Deserializes the DbtCloudTaskOutput from a dictionary.""" + return cls( + dbt_cloud_job_run_id=d.get("dbt_cloud_job_run_id", None), + dbt_cloud_job_run_output=_repeated_dict(d, "dbt_cloud_job_run_output", DbtCloudJobRunStep), + dbt_cloud_job_run_url=d.get("dbt_cloud_job_run_url", None), + ) + + @dataclass class DbtOutput: artifacts_headers: Optional[Dict[str, str]] = None @@ -1804,6 +1954,31 @@ def from_dict(cls, d: Dict[str, Any]) -> FileArrivalTriggerConfiguration: ) +@dataclass +class FileArrivalTriggerState: + using_file_events: Optional[bool] = None + """Indicates whether the trigger leverages file events to detect file arrivals.""" + + def as_dict(self) -> dict: + """Serializes the FileArrivalTriggerState into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.using_file_events is not None: + body["using_file_events"] = self.using_file_events + return body + + def as_shallow_dict(self) -> dict: + """Serializes the FileArrivalTriggerState into a shallow dictionary of its immediate attributes.""" + body = {} + if self.using_file_events is not None: + body["using_file_events"] = self.using_file_events + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> FileArrivalTriggerState: + """Deserializes the FileArrivalTriggerState from a dictionary.""" + return cls(using_file_events=d.get("using_file_events", None)) + + @dataclass class ForEachStats: error_message_stats: Optional[List[ForEachTaskErrorMessageStats]] = None @@ -2321,6 +2496,9 @@ class Job: """Settings for this job and all of its runs. These settings can be updated using the `resetJob` method.""" + trigger_state: Optional[TriggerStateProto] = None + """State of the trigger associated with the job.""" + def as_dict(self) -> dict: """Serializes the Job into a dictionary suitable for use as a JSON request body.""" body = {} @@ -2340,6 +2518,8 @@ def as_dict(self) -> dict: body["run_as_user_name"] = self.run_as_user_name if self.settings: body["settings"] = self.settings.as_dict() + if self.trigger_state: + body["trigger_state"] = self.trigger_state.as_dict() return body def as_shallow_dict(self) -> dict: @@ -2361,6 +2541,8 @@ def as_shallow_dict(self) -> dict: body["run_as_user_name"] = self.run_as_user_name if self.settings: body["settings"] = self.settings + if self.trigger_state: + body["trigger_state"] = self.trigger_state return body @classmethod @@ -2375,6 +2557,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Job: next_page_token=d.get("next_page_token", None), run_as_user_name=d.get("run_as_user_name", None), settings=_from_dict(d, "settings", JobSettings), + trigger_state=_from_dict(d, "trigger_state", TriggerStateProto), ) @@ -5771,6 +5954,8 @@ class RunOutput: dashboard_output: Optional[DashboardTaskOutput] = None """The output of a dashboard task, if available""" + dbt_cloud_output: Optional[DbtCloudTaskOutput] = None + dbt_output: Optional[DbtOutput] = None """The output of a dbt task, if available.""" @@ -5819,6 +6004,8 @@ def as_dict(self) -> dict: body["clean_rooms_notebook_output"] = self.clean_rooms_notebook_output.as_dict() if self.dashboard_output: body["dashboard_output"] = self.dashboard_output.as_dict() + if self.dbt_cloud_output: + body["dbt_cloud_output"] = self.dbt_cloud_output.as_dict() if self.dbt_output: body["dbt_output"] = self.dbt_output.as_dict() if self.error is not None: @@ -5848,6 +6035,8 @@ def as_shallow_dict(self) -> dict: body["clean_rooms_notebook_output"] = self.clean_rooms_notebook_output if self.dashboard_output: body["dashboard_output"] = self.dashboard_output + if self.dbt_cloud_output: + body["dbt_cloud_output"] = self.dbt_cloud_output if self.dbt_output: body["dbt_output"] = self.dbt_output if self.error is not None: @@ -5878,6 +6067,7 @@ def from_dict(cls, d: Dict[str, Any]) -> RunOutput: d, "clean_rooms_notebook_output", CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput ), dashboard_output=_from_dict(d, "dashboard_output", DashboardTaskOutput), + dbt_cloud_output=_from_dict(d, "dbt_cloud_output", DbtCloudTaskOutput), dbt_output=_from_dict(d, "dbt_output", DbtOutput), error=d.get("error", None), error_trace=d.get("error_trace", None), @@ -6197,6 +6387,9 @@ class RunTask: dashboard_task: Optional[DashboardTask] = None """The task refreshes a dashboard and sends a snapshot to subscribers.""" + dbt_cloud_task: Optional[DbtCloudTask] = None + """Task type for dbt cloud""" + dbt_task: Optional[DbtTask] = None """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.""" @@ -6377,6 +6570,8 @@ def as_dict(self) -> dict: body["condition_task"] = self.condition_task.as_dict() if self.dashboard_task: body["dashboard_task"] = self.dashboard_task.as_dict() + if self.dbt_cloud_task: + body["dbt_cloud_task"] = self.dbt_cloud_task.as_dict() if self.dbt_task: body["dbt_task"] = self.dbt_task.as_dict() if self.depends_on: @@ -6472,6 +6667,8 @@ def as_shallow_dict(self) -> dict: body["condition_task"] = self.condition_task if self.dashboard_task: body["dashboard_task"] = self.dashboard_task + if self.dbt_cloud_task: + body["dbt_cloud_task"] = self.dbt_cloud_task if self.dbt_task: body["dbt_task"] = self.dbt_task if self.depends_on: @@ -6562,6 +6759,7 @@ def from_dict(cls, d: Dict[str, Any]) -> RunTask: cluster_instance=_from_dict(d, "cluster_instance", ClusterInstance), condition_task=_from_dict(d, "condition_task", RunConditionTask), dashboard_task=_from_dict(d, "dashboard_task", DashboardTask), + dbt_cloud_task=_from_dict(d, "dbt_cloud_task", DbtCloudTask), dbt_task=_from_dict(d, "dbt_task", DbtTask), depends_on=_repeated_dict(d, "depends_on", TaskDependency), description=d.get("description", None), @@ -7585,6 +7783,9 @@ class SubmitTask: dashboard_task: Optional[DashboardTask] = None """The task refreshes a dashboard and sends a snapshot to subscribers.""" + dbt_cloud_task: Optional[DbtCloudTask] = None + """Task type for dbt cloud""" + dbt_task: Optional[DbtTask] = None """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.""" @@ -7695,6 +7896,8 @@ def as_dict(self) -> dict: body["condition_task"] = self.condition_task.as_dict() if self.dashboard_task: body["dashboard_task"] = self.dashboard_task.as_dict() + if self.dbt_cloud_task: + body["dbt_cloud_task"] = self.dbt_cloud_task.as_dict() if self.dbt_task: body["dbt_task"] = self.dbt_task.as_dict() if self.depends_on: @@ -7756,6 +7959,8 @@ def as_shallow_dict(self) -> dict: body["condition_task"] = self.condition_task if self.dashboard_task: body["dashboard_task"] = self.dashboard_task + if self.dbt_cloud_task: + body["dbt_cloud_task"] = self.dbt_cloud_task if self.dbt_task: body["dbt_task"] = self.dbt_task if self.depends_on: @@ -7815,6 +8020,7 @@ def from_dict(cls, d: Dict[str, Any]) -> SubmitTask: clean_rooms_notebook_task=_from_dict(d, "clean_rooms_notebook_task", CleanRoomsNotebookTask), condition_task=_from_dict(d, "condition_task", ConditionTask), dashboard_task=_from_dict(d, "dashboard_task", DashboardTask), + dbt_cloud_task=_from_dict(d, "dbt_cloud_task", DbtCloudTask), dbt_task=_from_dict(d, "dbt_task", DbtTask), depends_on=_repeated_dict(d, "depends_on", TaskDependency), description=d.get("description", None), @@ -7995,6 +8201,9 @@ class Task: dashboard_task: Optional[DashboardTask] = None """The task refreshes a dashboard and sends a snapshot to subscribers.""" + dbt_cloud_task: Optional[DbtCloudTask] = None + """Task type for dbt cloud""" + dbt_task: Optional[DbtTask] = None """The task runs one or more dbt commands when the `dbt_task` field is present. The dbt task requires both Databricks SQL and the ability to use a serverless or a pro SQL warehouse.""" @@ -8130,6 +8339,8 @@ def as_dict(self) -> dict: body["condition_task"] = self.condition_task.as_dict() if self.dashboard_task: body["dashboard_task"] = self.dashboard_task.as_dict() + if self.dbt_cloud_task: + body["dbt_cloud_task"] = self.dbt_cloud_task.as_dict() if self.dbt_task: body["dbt_task"] = self.dbt_task.as_dict() if self.depends_on: @@ -8201,6 +8412,8 @@ def as_shallow_dict(self) -> dict: body["condition_task"] = self.condition_task if self.dashboard_task: body["dashboard_task"] = self.dashboard_task + if self.dbt_cloud_task: + body["dbt_cloud_task"] = self.dbt_cloud_task if self.dbt_task: body["dbt_task"] = self.dbt_task if self.depends_on: @@ -8270,6 +8483,7 @@ def from_dict(cls, d: Dict[str, Any]) -> Task: clean_rooms_notebook_task=_from_dict(d, "clean_rooms_notebook_task", CleanRoomsNotebookTask), condition_task=_from_dict(d, "condition_task", ConditionTask), dashboard_task=_from_dict(d, "dashboard_task", DashboardTask), + dbt_cloud_task=_from_dict(d, "dbt_cloud_task", DbtCloudTask), dbt_task=_from_dict(d, "dbt_task", DbtTask), depends_on=_repeated_dict(d, "depends_on", TaskDependency), description=d.get("description", None), @@ -8715,6 +8929,30 @@ def from_dict(cls, d: Dict[str, Any]) -> TriggerSettings: ) +@dataclass +class TriggerStateProto: + file_arrival: Optional[FileArrivalTriggerState] = None + + def as_dict(self) -> dict: + """Serializes the TriggerStateProto into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.file_arrival: + body["file_arrival"] = self.file_arrival.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TriggerStateProto into a shallow dictionary of its immediate attributes.""" + body = {} + if self.file_arrival: + body["file_arrival"] = self.file_arrival + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TriggerStateProto: + """Deserializes the TriggerStateProto from a dictionary.""" + return cls(file_arrival=_from_dict(d, "file_arrival", FileArrivalTriggerState)) + + class TriggerType(Enum): """The type of trigger that fired this run. diff --git a/databricks/sdk/service/ml.py b/databricks/sdk/service/ml.py index 1e500f10d..b5a58078d 100755 --- a/databricks/sdk/service/ml.py +++ b/databricks/sdk/service/ml.py @@ -271,109 +271,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ApproveTransitionRequestResponse: return cls(activity=_from_dict(d, "activity", Activity)) -@dataclass -class ArtifactCredentialInfo: - headers: Optional[List[ArtifactCredentialInfoHttpHeader]] = None - """A collection of HTTP headers that should be specified when uploading to or downloading from the - specified `signed_uri`.""" - - path: Optional[str] = None - """The path, relative to the Run's artifact root location, of the artifact that can be accessed - with the credential.""" - - run_id: Optional[str] = None - """The ID of the MLflow Run containing the artifact that can be accessed with the credential.""" - - signed_uri: Optional[str] = None - """The signed URI credential that provides access to the artifact.""" - - type: Optional[ArtifactCredentialType] = None - """The type of the signed credential URI (e.g., an AWS presigned URL or an Azure Shared Access - Signature URI).""" - - def as_dict(self) -> dict: - """Serializes the ArtifactCredentialInfo into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.headers: - body["headers"] = [v.as_dict() for v in self.headers] - if self.path is not None: - body["path"] = self.path - if self.run_id is not None: - body["run_id"] = self.run_id - if self.signed_uri is not None: - body["signed_uri"] = self.signed_uri - if self.type is not None: - body["type"] = self.type.value - return body - - def as_shallow_dict(self) -> dict: - """Serializes the ArtifactCredentialInfo into a shallow dictionary of its immediate attributes.""" - body = {} - if self.headers: - body["headers"] = self.headers - if self.path is not None: - body["path"] = self.path - if self.run_id is not None: - body["run_id"] = self.run_id - if self.signed_uri is not None: - body["signed_uri"] = self.signed_uri - if self.type is not None: - body["type"] = self.type - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ArtifactCredentialInfo: - """Deserializes the ArtifactCredentialInfo from a dictionary.""" - return cls( - headers=_repeated_dict(d, "headers", ArtifactCredentialInfoHttpHeader), - path=d.get("path", None), - run_id=d.get("run_id", None), - signed_uri=d.get("signed_uri", None), - type=_enum(d, "type", ArtifactCredentialType), - ) - - -@dataclass -class ArtifactCredentialInfoHttpHeader: - name: Optional[str] = None - """The HTTP header name.""" - - value: Optional[str] = None - """The HTTP header value.""" - - def as_dict(self) -> dict: - """Serializes the ArtifactCredentialInfoHttpHeader into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.name is not None: - body["name"] = self.name - if self.value is not None: - body["value"] = self.value - return body - - def as_shallow_dict(self) -> dict: - """Serializes the ArtifactCredentialInfoHttpHeader into a shallow dictionary of its immediate attributes.""" - body = {} - if self.name is not None: - body["name"] = self.name - if self.value is not None: - body["value"] = self.value - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ArtifactCredentialInfoHttpHeader: - """Deserializes the ArtifactCredentialInfoHttpHeader from a dictionary.""" - return cls(name=d.get("name", None), value=d.get("value", None)) - - -class ArtifactCredentialType(Enum): - """The type of a given artifact access credential""" - - AWS_PRESIGNED_URL = "AWS_PRESIGNED_URL" - AZURE_ADLS_GEN2_SAS_URI = "AZURE_ADLS_GEN2_SAS_URI" - AZURE_SAS_URI = "AZURE_SAS_URI" - GCP_SIGNED_URL = "GCP_SIGNED_URL" - - class CommentActivityAction(Enum): """An action that a user (with sufficient permissions) could take on a comment. Valid values are: * `EDIT_COMMENT`: Edit the comment @@ -1076,7 +973,8 @@ class CreateRegistryWebhook: job_spec: Optional[JobSpec] = None model_name: Optional[str] = None - """Name of the model whose events would trigger this webhook.""" + """If model name is not specified, a registry-wide webhook is created that listens for the + specified events across all versions of all registered models.""" status: Optional[RegistryWebhookStatus] = None """Enable or disable triggering the webhook, or put the webhook into test mode. The default is @@ -2235,7 +2133,7 @@ def from_dict(cls, d: Dict[str, Any]) -> FileInfo: class FinalizeLoggedModelRequest: status: LoggedModelStatus """Whether or not the model is ready for use. ``"LOGGED_MODEL_UPLOAD_FAILED"`` indicates that - something went wrong when logging the model weights / agent code).""" + something went wrong when logging the model weights / agent code.""" model_id: Optional[str] = None """The ID of the logged model to finalize.""" @@ -2343,56 +2241,6 @@ class ForecastingExperimentState(Enum): SUCCEEDED = "SUCCEEDED" -@dataclass -class GetCredentialsForTraceDataDownloadResponse: - credential_info: Optional[ArtifactCredentialInfo] = None - """The artifact download credentials for the specified trace data.""" - - def as_dict(self) -> dict: - """Serializes the GetCredentialsForTraceDataDownloadResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.credential_info: - body["credential_info"] = self.credential_info.as_dict() - return body - - def as_shallow_dict(self) -> dict: - """Serializes the GetCredentialsForTraceDataDownloadResponse into a shallow dictionary of its immediate attributes.""" - body = {} - if self.credential_info: - body["credential_info"] = self.credential_info - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> GetCredentialsForTraceDataDownloadResponse: - """Deserializes the GetCredentialsForTraceDataDownloadResponse from a dictionary.""" - return cls(credential_info=_from_dict(d, "credential_info", ArtifactCredentialInfo)) - - -@dataclass -class GetCredentialsForTraceDataUploadResponse: - credential_info: Optional[ArtifactCredentialInfo] = None - """The artifact upload credentials for the specified trace data.""" - - def as_dict(self) -> dict: - """Serializes the GetCredentialsForTraceDataUploadResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.credential_info: - body["credential_info"] = self.credential_info.as_dict() - return body - - def as_shallow_dict(self) -> dict: - """Serializes the GetCredentialsForTraceDataUploadResponse into a shallow dictionary of its immediate attributes.""" - body = {} - if self.credential_info: - body["credential_info"] = self.credential_info - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> GetCredentialsForTraceDataUploadResponse: - """Deserializes the GetCredentialsForTraceDataUploadResponse from a dictionary.""" - return cls(credential_info=_from_dict(d, "credential_info", ArtifactCredentialInfo)) - - @dataclass class GetExperimentByNameResponse: experiment: Optional[Experiment] = None @@ -2993,49 +2841,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ListExperimentsResponse: ) -@dataclass -class ListLoggedModelArtifactsResponse: - files: Optional[List[FileInfo]] = None - """File location and metadata for artifacts.""" - - next_page_token: Optional[str] = None - """Token that can be used to retrieve the next page of artifact results""" - - root_uri: Optional[str] = None - """Root artifact directory for the logged model.""" - - def as_dict(self) -> dict: - """Serializes the ListLoggedModelArtifactsResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.files: - body["files"] = [v.as_dict() for v in self.files] - if self.next_page_token is not None: - body["next_page_token"] = self.next_page_token - if self.root_uri is not None: - body["root_uri"] = self.root_uri - return body - - def as_shallow_dict(self) -> dict: - """Serializes the ListLoggedModelArtifactsResponse into a shallow dictionary of its immediate attributes.""" - body = {} - if self.files: - body["files"] = self.files - if self.next_page_token is not None: - body["next_page_token"] = self.next_page_token - if self.root_uri is not None: - body["root_uri"] = self.root_uri - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ListLoggedModelArtifactsResponse: - """Deserializes the ListLoggedModelArtifactsResponse from a dictionary.""" - return cls( - files=_repeated_dict(d, "files", FileInfo), - next_page_token=d.get("next_page_token", None), - root_uri=d.get("root_uri", None), - ) - - @dataclass class ListModelsResponse: next_page_token: Optional[str] = None @@ -5494,10 +5299,7 @@ class RunInputs: """Run metrics.""" model_inputs: Optional[List[ModelInput]] = None - """**NOTE**: Experimental: This API field may change or be removed in a future release without - warning. - - Model inputs to the Run.""" + """Model inputs to the Run.""" def as_dict(self) -> dict: """Serializes the RunInputs into a dictionary suitable for use as a JSON request body.""" @@ -7339,7 +7141,7 @@ def finalize_logged_model(self, model_id: str, status: LoggedModelStatus) -> Fin The ID of the logged model to finalize. :param status: :class:`LoggedModelStatus` Whether or not the model is ready for use. ``"LOGGED_MODEL_UPLOAD_FAILED"`` indicates that something - went wrong when logging the model weights / agent code). + went wrong when logging the model weights / agent code. :returns: :class:`FinalizeLoggedModelResponse` """ @@ -7381,38 +7183,6 @@ def get_by_name(self, experiment_name: str) -> GetExperimentByNameResponse: res = self._api.do("GET", "/api/2.0/mlflow/experiments/get-by-name", query=query, headers=headers) return GetExperimentByNameResponse.from_dict(res) - def get_credentials_for_trace_data_download(self, request_id: str) -> GetCredentialsForTraceDataDownloadResponse: - """Get credentials to download trace data. - - :param request_id: str - The ID of the trace to fetch artifact download credentials for. - - :returns: :class:`GetCredentialsForTraceDataDownloadResponse` - """ - - headers = { - "Accept": "application/json", - } - - res = self._api.do("GET", f"/api/2.0/mlflow/traces/{request_id}/credentials-for-data-download", headers=headers) - return GetCredentialsForTraceDataDownloadResponse.from_dict(res) - - def get_credentials_for_trace_data_upload(self, request_id: str) -> GetCredentialsForTraceDataUploadResponse: - """Get credentials to upload trace data. - - :param request_id: str - The ID of the trace to fetch artifact upload credentials for. - - :returns: :class:`GetCredentialsForTraceDataUploadResponse` - """ - - headers = { - "Accept": "application/json", - } - - res = self._api.do("GET", f"/api/2.0/mlflow/traces/{request_id}/credentials-for-data-upload", headers=headers) - return GetCredentialsForTraceDataUploadResponse.from_dict(res) - def get_experiment(self, experiment_id: str) -> GetExperimentResponse: """Get an experiment. @@ -7666,41 +7436,6 @@ def list_experiments( return query["page_token"] = json["next_page_token"] - def list_logged_model_artifacts( - self, model_id: str, *, artifact_directory_path: Optional[str] = None, page_token: Optional[str] = None - ) -> ListLoggedModelArtifactsResponse: - """List artifacts for a logged model. - - List artifacts for a logged model. Takes an optional ``artifact_directory_path`` prefix which if - specified, the response contains only artifacts with the specified prefix. - - :param model_id: str - The ID of the logged model for which to list the artifacts. - :param artifact_directory_path: str (optional) - Filter artifacts matching this path (a relative path from the root artifact directory). - :param page_token: str (optional) - Token indicating the page of artifact results to fetch. `page_token` is not supported when listing - artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. Please call - `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which supports - pagination. See [List directory contents | Files API](/api/workspace/files/listdirectorycontents). - - :returns: :class:`ListLoggedModelArtifactsResponse` - """ - - query = {} - if artifact_directory_path is not None: - query["artifact_directory_path"] = artifact_directory_path - if page_token is not None: - query["page_token"] = page_token - headers = { - "Accept": "application/json", - } - - res = self._api.do( - "GET", f"/api/2.0/mlflow/logged-models/{model_id}/artifacts/directories", query=query, headers=headers - ) - return ListLoggedModelArtifactsResponse.from_dict(res) - def log_batch( self, *, @@ -8949,7 +8684,8 @@ def create_webhook( :param http_url_spec: :class:`HttpUrlSpec` (optional) :param job_spec: :class:`JobSpec` (optional) :param model_name: str (optional) - Name of the model whose events would trigger this webhook. + If model name is not specified, a registry-wide webhook is created that listens for the specified + events across all versions of all registered models. :param status: :class:`RegistryWebhookStatus` (optional) Enable or disable triggering the webhook, or put the webhook into test mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an associated event happens. @@ -8988,6 +8724,7 @@ def delete_comment(self, id: str): Deletes a comment on a model version. :param id: str + Unique identifier of an activity """ diff --git a/databricks/sdk/service/pipelines.py b/databricks/sdk/service/pipelines.py index 943810a33..ef4363bb8 100755 --- a/databricks/sdk/service/pipelines.py +++ b/databricks/sdk/service/pipelines.py @@ -111,6 +111,11 @@ class CreatePipeline: storage: Optional[str] = None """DBFS root directory for storing checkpoints and tables.""" + tags: Optional[Dict[str, str]] = None + """A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, + and are therefore subject to the same limitations. A maximum of 25 tags can be added to the + pipeline.""" + target: Optional[str] = None """Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -174,6 +179,8 @@ def as_dict(self) -> dict: body["serverless"] = self.serverless if self.storage is not None: body["storage"] = self.storage + if self.tags: + body["tags"] = self.tags if self.target is not None: body["target"] = self.target if self.trigger: @@ -235,6 +242,8 @@ def as_shallow_dict(self) -> dict: body["serverless"] = self.serverless if self.storage is not None: body["storage"] = self.storage + if self.tags: + body["tags"] = self.tags if self.target is not None: body["target"] = self.target if self.trigger: @@ -271,6 +280,7 @@ def from_dict(cls, d: Dict[str, Any]) -> CreatePipeline: schema=d.get("schema", None), serverless=d.get("serverless", None), storage=d.get("storage", None), + tags=d.get("tags", None), target=d.get("target", None), trigger=_from_dict(d, "trigger", PipelineTrigger), ) @@ -505,6 +515,11 @@ class EditPipeline: storage: Optional[str] = None """DBFS root directory for storing checkpoints and tables.""" + tags: Optional[Dict[str, str]] = None + """A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, + and are therefore subject to the same limitations. A maximum of 25 tags can be added to the + pipeline.""" + target: Optional[str] = None """Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -570,6 +585,8 @@ def as_dict(self) -> dict: body["serverless"] = self.serverless if self.storage is not None: body["storage"] = self.storage + if self.tags: + body["tags"] = self.tags if self.target is not None: body["target"] = self.target if self.trigger: @@ -633,6 +650,8 @@ def as_shallow_dict(self) -> dict: body["serverless"] = self.serverless if self.storage is not None: body["storage"] = self.storage + if self.tags: + body["tags"] = self.tags if self.target is not None: body["target"] = self.target if self.trigger: @@ -670,6 +689,7 @@ def from_dict(cls, d: Dict[str, Any]) -> EditPipeline: schema=d.get("schema", None), serverless=d.get("serverless", None), storage=d.get("storage", None), + tags=d.get("tags", None), target=d.get("target", None), trigger=_from_dict(d, "trigger", PipelineTrigger), ) @@ -1186,6 +1206,7 @@ class IngestionSourceType(Enum): SERVICENOW = "SERVICENOW" SHAREPOINT = "SHAREPOINT" SQLSERVER = "SQLSERVER" + TERADATA = "TERADATA" WORKDAY_RAAS = "WORKDAY_RAAS" @@ -2386,6 +2407,11 @@ class PipelineSpec: storage: Optional[str] = None """DBFS root directory for storing checkpoints and tables.""" + tags: Optional[Dict[str, str]] = None + """A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, + and are therefore subject to the same limitations. A maximum of 25 tags can be added to the + pipeline.""" + target: Optional[str] = None """Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is @@ -2443,6 +2469,8 @@ def as_dict(self) -> dict: body["serverless"] = self.serverless if self.storage is not None: body["storage"] = self.storage + if self.tags: + body["tags"] = self.tags if self.target is not None: body["target"] = self.target if self.trigger: @@ -2498,6 +2526,8 @@ def as_shallow_dict(self) -> dict: body["serverless"] = self.serverless if self.storage is not None: body["storage"] = self.storage + if self.tags: + body["tags"] = self.tags if self.target is not None: body["target"] = self.target if self.trigger: @@ -2531,6 +2561,7 @@ def from_dict(cls, d: Dict[str, Any]) -> PipelineSpec: schema=d.get("schema", None), serverless=d.get("serverless", None), storage=d.get("storage", None), + tags=d.get("tags", None), target=d.get("target", None), trigger=_from_dict(d, "trigger", PipelineTrigger), ) @@ -3568,6 +3599,7 @@ def create( schema: Optional[str] = None, serverless: Optional[bool] = None, storage: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, target: Optional[str] = None, trigger: Optional[PipelineTrigger] = None, ) -> CreatePipelineResponse: @@ -3636,6 +3668,9 @@ def create( Whether serverless compute is enabled for this pipeline. :param storage: str (optional) DBFS root directory for storing checkpoints and tables. + :param tags: Dict[str,str] (optional) + A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and + are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline. :param target: str (optional) Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated @@ -3698,6 +3733,8 @@ def create( body["serverless"] = serverless if storage is not None: body["storage"] = storage + if tags is not None: + body["tags"] = tags if target is not None: body["target"] = target if trigger is not None: @@ -3713,7 +3750,8 @@ def create( def delete(self, pipeline_id: str): """Delete a pipeline. - Deletes a pipeline. + Deletes a pipeline. Deleting a pipeline is a permanent action that stops and removes the pipeline and + its tables. You cannot undo this action. :param pipeline_id: str @@ -4083,6 +4121,7 @@ def update( schema: Optional[str] = None, serverless: Optional[bool] = None, storage: Optional[str] = None, + tags: Optional[Dict[str, str]] = None, target: Optional[str] = None, trigger: Optional[PipelineTrigger] = None, ): @@ -4154,6 +4193,9 @@ def update( Whether serverless compute is enabled for this pipeline. :param storage: str (optional) DBFS root directory for storing checkpoints and tables. + :param tags: Dict[str,str] (optional) + A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and + are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline. :param target: str (optional) Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated @@ -4216,6 +4258,8 @@ def update( body["serverless"] = serverless if storage is not None: body["storage"] = storage + if tags is not None: + body["tags"] = tags if target is not None: body["target"] = target if trigger is not None: diff --git a/databricks/sdk/service/provisioning.py b/databricks/sdk/service/provisioning.py index 42feb57bc..e56c0c382 100755 --- a/databricks/sdk/service/provisioning.py +++ b/databricks/sdk/service/provisioning.py @@ -779,9 +779,6 @@ def from_dict(cls, d: Dict[str, Any]) -> Credential: ) -CustomTags = Dict[str, str] - - @dataclass class CustomerFacingGcpCloudResourceContainer: """The general workspace configurations that are specific to Google Cloud.""" diff --git a/databricks/sdk/service/qualitymonitorv2.py b/databricks/sdk/service/qualitymonitorv2.py new file mode 100755 index 000000000..bf3ef953f --- /dev/null +++ b/databricks/sdk/service/qualitymonitorv2.py @@ -0,0 +1,275 @@ +# Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. + +from __future__ import annotations + +import logging +from dataclasses import dataclass +from enum import Enum +from typing import Any, Dict, Iterator, List, Optional + +from ._internal import _enum, _from_dict, _repeated_dict + +_LOG = logging.getLogger("databricks.sdk") + + +# all definitions in this file are in alphabetical order + + +@dataclass +class AnomalyDetectionConfig: + last_run_id: Optional[str] = None + """Run id of the last run of the workflow""" + + latest_run_status: Optional[AnomalyDetectionRunStatus] = None + """The status of the last run of the workflow.""" + + def as_dict(self) -> dict: + """Serializes the AnomalyDetectionConfig into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.last_run_id is not None: + body["last_run_id"] = self.last_run_id + if self.latest_run_status is not None: + body["latest_run_status"] = self.latest_run_status.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the AnomalyDetectionConfig into a shallow dictionary of its immediate attributes.""" + body = {} + if self.last_run_id is not None: + body["last_run_id"] = self.last_run_id + if self.latest_run_status is not None: + body["latest_run_status"] = self.latest_run_status + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> AnomalyDetectionConfig: + """Deserializes the AnomalyDetectionConfig from a dictionary.""" + return cls( + last_run_id=d.get("last_run_id", None), + latest_run_status=_enum(d, "latest_run_status", AnomalyDetectionRunStatus), + ) + + +class AnomalyDetectionRunStatus(Enum): + """Status of Anomaly Detection Job Run""" + + ANOMALY_DETECTION_RUN_STATUS_CANCELED = "ANOMALY_DETECTION_RUN_STATUS_CANCELED" + ANOMALY_DETECTION_RUN_STATUS_FAILED = "ANOMALY_DETECTION_RUN_STATUS_FAILED" + ANOMALY_DETECTION_RUN_STATUS_JOB_DELETED = "ANOMALY_DETECTION_RUN_STATUS_JOB_DELETED" + ANOMALY_DETECTION_RUN_STATUS_PENDING = "ANOMALY_DETECTION_RUN_STATUS_PENDING" + ANOMALY_DETECTION_RUN_STATUS_RUNNING = "ANOMALY_DETECTION_RUN_STATUS_RUNNING" + ANOMALY_DETECTION_RUN_STATUS_SUCCESS = "ANOMALY_DETECTION_RUN_STATUS_SUCCESS" + ANOMALY_DETECTION_RUN_STATUS_UNKNOWN = "ANOMALY_DETECTION_RUN_STATUS_UNKNOWN" + ANOMALY_DETECTION_RUN_STATUS_WORKSPACE_MISMATCH_ERROR = "ANOMALY_DETECTION_RUN_STATUS_WORKSPACE_MISMATCH_ERROR" + + +@dataclass +class DeleteQualityMonitorResponse: + def as_dict(self) -> dict: + """Serializes the DeleteQualityMonitorResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DeleteQualityMonitorResponse into a shallow dictionary of its immediate attributes.""" + body = {} + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DeleteQualityMonitorResponse: + """Deserializes the DeleteQualityMonitorResponse from a dictionary.""" + return cls() + + +@dataclass +class ListQualityMonitorResponse: + next_page_token: Optional[str] = None + + quality_monitors: Optional[List[QualityMonitor]] = None + + def as_dict(self) -> dict: + """Serializes the ListQualityMonitorResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.quality_monitors: + body["quality_monitors"] = [v.as_dict() for v in self.quality_monitors] + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListQualityMonitorResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + if self.quality_monitors: + body["quality_monitors"] = self.quality_monitors + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListQualityMonitorResponse: + """Deserializes the ListQualityMonitorResponse from a dictionary.""" + return cls( + next_page_token=d.get("next_page_token", None), + quality_monitors=_repeated_dict(d, "quality_monitors", QualityMonitor), + ) + + +@dataclass +class QualityMonitor: + object_type: str + """The type of the monitored object. Can be one of the following: schema.""" + + object_id: str + """The uuid of the request object. For example, schema id.""" + + anomaly_detection_config: Optional[AnomalyDetectionConfig] = None + + def as_dict(self) -> dict: + """Serializes the QualityMonitor into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.anomaly_detection_config: + body["anomaly_detection_config"] = self.anomaly_detection_config.as_dict() + if self.object_id is not None: + body["object_id"] = self.object_id + if self.object_type is not None: + body["object_type"] = self.object_type + return body + + def as_shallow_dict(self) -> dict: + """Serializes the QualityMonitor into a shallow dictionary of its immediate attributes.""" + body = {} + if self.anomaly_detection_config: + body["anomaly_detection_config"] = self.anomaly_detection_config + if self.object_id is not None: + body["object_id"] = self.object_id + if self.object_type is not None: + body["object_type"] = self.object_type + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> QualityMonitor: + """Deserializes the QualityMonitor from a dictionary.""" + return cls( + anomaly_detection_config=_from_dict(d, "anomaly_detection_config", AnomalyDetectionConfig), + object_id=d.get("object_id", None), + object_type=d.get("object_type", None), + ) + + +class QualityMonitorV2API: + """Manage data quality of UC objects (currently support `schema`)""" + + def __init__(self, api_client): + self._api = api_client + + def create_quality_monitor(self, quality_monitor: QualityMonitor) -> QualityMonitor: + """Create a quality monitor. + + Create a quality monitor on UC object + + :param quality_monitor: :class:`QualityMonitor` + + :returns: :class:`QualityMonitor` + """ + body = quality_monitor.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("POST", "/api/2.0/quality-monitors", body=body, headers=headers) + return QualityMonitor.from_dict(res) + + def delete_quality_monitor(self, object_type: str, object_id: str): + """Delete a quality monitor. + + Delete a quality monitor on UC object + + :param object_type: str + The type of the monitored object. Can be one of the following: schema. + :param object_id: str + The uuid of the request object. For example, schema id. + + + """ + + headers = { + "Accept": "application/json", + } + + self._api.do("DELETE", f"/api/2.0/quality-monitors/{object_type}/{object_id}", headers=headers) + + def get_quality_monitor(self, object_type: str, object_id: str) -> QualityMonitor: + """Read a quality monitor. + + Read a quality monitor on UC object + + :param object_type: str + The type of the monitored object. Can be one of the following: schema. + :param object_id: str + The uuid of the request object. For example, schema id. + + :returns: :class:`QualityMonitor` + """ + + headers = { + "Accept": "application/json", + } + + res = self._api.do("GET", f"/api/2.0/quality-monitors/{object_type}/{object_id}", headers=headers) + return QualityMonitor.from_dict(res) + + def list_quality_monitor( + self, *, page_size: Optional[int] = None, page_token: Optional[str] = None + ) -> Iterator[QualityMonitor]: + """List quality monitors. + + (Unimplemented) List quality monitors + + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`QualityMonitor` + """ + + query = {} + if page_size is not None: + query["page_size"] = page_size + if page_token is not None: + query["page_token"] = page_token + headers = { + "Accept": "application/json", + } + + while True: + json = self._api.do("GET", "/api/2.0/quality-monitors", query=query, headers=headers) + if "quality_monitors" in json: + for v in json["quality_monitors"]: + yield QualityMonitor.from_dict(v) + if "next_page_token" not in json or not json["next_page_token"]: + return + query["page_token"] = json["next_page_token"] + + def update_quality_monitor( + self, object_type: str, object_id: str, quality_monitor: QualityMonitor + ) -> QualityMonitor: + """Update a quality monitor. + + (Unimplemented) Update a quality monitor on UC object + + :param object_type: str + The type of the monitored object. Can be one of the following: schema. + :param object_id: str + The uuid of the request object. For example, schema id. + :param quality_monitor: :class:`QualityMonitor` + + :returns: :class:`QualityMonitor` + """ + body = quality_monitor.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do("PUT", f"/api/2.0/quality-monitors/{object_type}/{object_id}", body=body, headers=headers) + return QualityMonitor.from_dict(res) diff --git a/databricks/sdk/service/serving.py b/databricks/sdk/service/serving.py index 6feb1fa01..8d8c09ff8 100755 --- a/databricks/sdk/service/serving.py +++ b/databricks/sdk/service/serving.py @@ -3005,9 +3005,17 @@ class ServedEntityInput: instance_profile_arn: Optional[str] = None """ARN of the instance profile that the served entity uses to access AWS resources.""" + max_provisioned_concurrency: Optional[int] = None + """The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + workload_size is specified.""" + max_provisioned_throughput: Optional[int] = None """The maximum tokens per second that the endpoint can scale up to.""" + min_provisioned_concurrency: Optional[int] = None + """The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + workload_size is specified.""" + min_provisioned_throughput: Optional[int] = None """The minimum tokens per second that the endpoint can scale down to.""" @@ -3030,7 +3038,7 @@ class ServedEntityInput: "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size - is 0.""" + is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.""" workload_type: Optional[ServingModelWorkloadType] = None """The workload type of the served entity. The workload type selects which type of compute to use @@ -3053,8 +3061,12 @@ def as_dict(self) -> dict: body["external_model"] = self.external_model.as_dict() if self.instance_profile_arn is not None: body["instance_profile_arn"] = self.instance_profile_arn + if self.max_provisioned_concurrency is not None: + body["max_provisioned_concurrency"] = self.max_provisioned_concurrency if self.max_provisioned_throughput is not None: body["max_provisioned_throughput"] = self.max_provisioned_throughput + if self.min_provisioned_concurrency is not None: + body["min_provisioned_concurrency"] = self.min_provisioned_concurrency if self.min_provisioned_throughput is not None: body["min_provisioned_throughput"] = self.min_provisioned_throughput if self.name is not None: @@ -3082,8 +3094,12 @@ def as_shallow_dict(self) -> dict: body["external_model"] = self.external_model if self.instance_profile_arn is not None: body["instance_profile_arn"] = self.instance_profile_arn + if self.max_provisioned_concurrency is not None: + body["max_provisioned_concurrency"] = self.max_provisioned_concurrency if self.max_provisioned_throughput is not None: body["max_provisioned_throughput"] = self.max_provisioned_throughput + if self.min_provisioned_concurrency is not None: + body["min_provisioned_concurrency"] = self.min_provisioned_concurrency if self.min_provisioned_throughput is not None: body["min_provisioned_throughput"] = self.min_provisioned_throughput if self.name is not None: @@ -3107,7 +3123,9 @@ def from_dict(cls, d: Dict[str, Any]) -> ServedEntityInput: environment_vars=d.get("environment_vars", None), external_model=_from_dict(d, "external_model", ExternalModel), instance_profile_arn=d.get("instance_profile_arn", None), + max_provisioned_concurrency=d.get("max_provisioned_concurrency", None), max_provisioned_throughput=d.get("max_provisioned_throughput", None), + min_provisioned_concurrency=d.get("min_provisioned_concurrency", None), min_provisioned_throughput=d.get("min_provisioned_throughput", None), name=d.get("name", None), provisioned_model_units=d.get("provisioned_model_units", None), @@ -3152,9 +3170,17 @@ class ServedEntityOutput: instance_profile_arn: Optional[str] = None """ARN of the instance profile that the served entity uses to access AWS resources.""" + max_provisioned_concurrency: Optional[int] = None + """The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + workload_size is specified.""" + max_provisioned_throughput: Optional[int] = None """The maximum tokens per second that the endpoint can scale up to.""" + min_provisioned_concurrency: Optional[int] = None + """The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + workload_size is specified.""" + min_provisioned_throughput: Optional[int] = None """The minimum tokens per second that the endpoint can scale down to.""" @@ -3179,7 +3205,7 @@ class ServedEntityOutput: "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size - is 0.""" + is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.""" workload_type: Optional[ServingModelWorkloadType] = None """The workload type of the served entity. The workload type selects which type of compute to use @@ -3208,8 +3234,12 @@ def as_dict(self) -> dict: body["foundation_model"] = self.foundation_model.as_dict() if self.instance_profile_arn is not None: body["instance_profile_arn"] = self.instance_profile_arn + if self.max_provisioned_concurrency is not None: + body["max_provisioned_concurrency"] = self.max_provisioned_concurrency if self.max_provisioned_throughput is not None: body["max_provisioned_throughput"] = self.max_provisioned_throughput + if self.min_provisioned_concurrency is not None: + body["min_provisioned_concurrency"] = self.min_provisioned_concurrency if self.min_provisioned_throughput is not None: body["min_provisioned_throughput"] = self.min_provisioned_throughput if self.name is not None: @@ -3245,8 +3275,12 @@ def as_shallow_dict(self) -> dict: body["foundation_model"] = self.foundation_model if self.instance_profile_arn is not None: body["instance_profile_arn"] = self.instance_profile_arn + if self.max_provisioned_concurrency is not None: + body["max_provisioned_concurrency"] = self.max_provisioned_concurrency if self.max_provisioned_throughput is not None: body["max_provisioned_throughput"] = self.max_provisioned_throughput + if self.min_provisioned_concurrency is not None: + body["min_provisioned_concurrency"] = self.min_provisioned_concurrency if self.min_provisioned_throughput is not None: body["min_provisioned_throughput"] = self.min_provisioned_throughput if self.name is not None: @@ -3275,7 +3309,9 @@ def from_dict(cls, d: Dict[str, Any]) -> ServedEntityOutput: external_model=_from_dict(d, "external_model", ExternalModel), foundation_model=_from_dict(d, "foundation_model", FoundationModel), instance_profile_arn=d.get("instance_profile_arn", None), + max_provisioned_concurrency=d.get("max_provisioned_concurrency", None), max_provisioned_throughput=d.get("max_provisioned_throughput", None), + min_provisioned_concurrency=d.get("min_provisioned_concurrency", None), min_provisioned_throughput=d.get("min_provisioned_throughput", None), name=d.get("name", None), provisioned_model_units=d.get("provisioned_model_units", None), @@ -3360,9 +3396,17 @@ class ServedModelInput: instance_profile_arn: Optional[str] = None """ARN of the instance profile that the served entity uses to access AWS resources.""" + max_provisioned_concurrency: Optional[int] = None + """The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + workload_size is specified.""" + max_provisioned_throughput: Optional[int] = None """The maximum tokens per second that the endpoint can scale up to.""" + min_provisioned_concurrency: Optional[int] = None + """The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + workload_size is specified.""" + min_provisioned_throughput: Optional[int] = None """The minimum tokens per second that the endpoint can scale down to.""" @@ -3382,7 +3426,7 @@ class ServedModelInput: "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size - is 0.""" + is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.""" workload_type: Optional[ServedModelInputWorkloadType] = None """The workload type of the served entity. The workload type selects which type of compute to use @@ -3399,8 +3443,12 @@ def as_dict(self) -> dict: body["environment_vars"] = self.environment_vars if self.instance_profile_arn is not None: body["instance_profile_arn"] = self.instance_profile_arn + if self.max_provisioned_concurrency is not None: + body["max_provisioned_concurrency"] = self.max_provisioned_concurrency if self.max_provisioned_throughput is not None: body["max_provisioned_throughput"] = self.max_provisioned_throughput + if self.min_provisioned_concurrency is not None: + body["min_provisioned_concurrency"] = self.min_provisioned_concurrency if self.min_provisioned_throughput is not None: body["min_provisioned_throughput"] = self.min_provisioned_throughput if self.model_name is not None: @@ -3426,8 +3474,12 @@ def as_shallow_dict(self) -> dict: body["environment_vars"] = self.environment_vars if self.instance_profile_arn is not None: body["instance_profile_arn"] = self.instance_profile_arn + if self.max_provisioned_concurrency is not None: + body["max_provisioned_concurrency"] = self.max_provisioned_concurrency if self.max_provisioned_throughput is not None: body["max_provisioned_throughput"] = self.max_provisioned_throughput + if self.min_provisioned_concurrency is not None: + body["min_provisioned_concurrency"] = self.min_provisioned_concurrency if self.min_provisioned_throughput is not None: body["min_provisioned_throughput"] = self.min_provisioned_throughput if self.model_name is not None: @@ -3452,7 +3504,9 @@ def from_dict(cls, d: Dict[str, Any]) -> ServedModelInput: return cls( environment_vars=d.get("environment_vars", None), instance_profile_arn=d.get("instance_profile_arn", None), + max_provisioned_concurrency=d.get("max_provisioned_concurrency", None), max_provisioned_throughput=d.get("max_provisioned_throughput", None), + min_provisioned_concurrency=d.get("min_provisioned_concurrency", None), min_provisioned_throughput=d.get("min_provisioned_throughput", None), model_name=d.get("model_name", None), model_version=d.get("model_version", None), @@ -3489,6 +3543,14 @@ class ServedModelOutput: instance_profile_arn: Optional[str] = None """ARN of the instance profile that the served entity uses to access AWS resources.""" + max_provisioned_concurrency: Optional[int] = None + """The maximum provisioned concurrency that the endpoint can scale up to. Do not use if + workload_size is specified.""" + + min_provisioned_concurrency: Optional[int] = None + """The minimum provisioned concurrency that the endpoint can scale down to. Do not use if + workload_size is specified.""" + model_name: Optional[str] = None model_version: Optional[str] = None @@ -3514,7 +3576,7 @@ class ServedModelOutput: "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). Additional custom workload sizes can also be used when available in the workspace. If scale-to-zero is enabled, the lower bound of the provisioned concurrency for each workload size - is 0.""" + is 0. Do not use if min_provisioned_concurrency and max_provisioned_concurrency are specified.""" workload_type: Optional[ServingModelWorkloadType] = None """The workload type of the served entity. The workload type selects which type of compute to use @@ -3535,6 +3597,10 @@ def as_dict(self) -> dict: body["environment_vars"] = self.environment_vars if self.instance_profile_arn is not None: body["instance_profile_arn"] = self.instance_profile_arn + if self.max_provisioned_concurrency is not None: + body["max_provisioned_concurrency"] = self.max_provisioned_concurrency + if self.min_provisioned_concurrency is not None: + body["min_provisioned_concurrency"] = self.min_provisioned_concurrency if self.model_name is not None: body["model_name"] = self.model_name if self.model_version is not None: @@ -3564,6 +3630,10 @@ def as_shallow_dict(self) -> dict: body["environment_vars"] = self.environment_vars if self.instance_profile_arn is not None: body["instance_profile_arn"] = self.instance_profile_arn + if self.max_provisioned_concurrency is not None: + body["max_provisioned_concurrency"] = self.max_provisioned_concurrency + if self.min_provisioned_concurrency is not None: + body["min_provisioned_concurrency"] = self.min_provisioned_concurrency if self.model_name is not None: body["model_name"] = self.model_name if self.model_version is not None: @@ -3590,6 +3660,8 @@ def from_dict(cls, d: Dict[str, Any]) -> ServedModelOutput: creator=d.get("creator", None), environment_vars=d.get("environment_vars", None), instance_profile_arn=d.get("instance_profile_arn", None), + max_provisioned_concurrency=d.get("max_provisioned_concurrency", None), + min_provisioned_concurrency=d.get("min_provisioned_concurrency", None), model_name=d.get("model_name", None), model_version=d.get("model_version", None), name=d.get("name", None), diff --git a/databricks/sdk/service/settings.py b/databricks/sdk/service/settings.py index 70b3bc0a6..3bdbffb31 100755 --- a/databricks/sdk/service/settings.py +++ b/databricks/sdk/service/settings.py @@ -963,31 +963,45 @@ class CreatePrivateEndpointRule: """Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure portal after initialization.""" - resource_id: str - """The Azure resource ID of the target resource.""" - domain_names: Optional[List[str]] = None - """Only used by private endpoints to customer-managed resources. + """Only used by private endpoints to customer-managed private endpoint services. Domain names of target private link service. When updating this field, the full list of target domain_names must be specified.""" + endpoint_service: Optional[str] = None + """The full target AWS endpoint service name that connects to the destination resources of the + private endpoint.""" + group_id: Optional[str] = None - """Only used by private endpoints to Azure first-party services. Enum: blob | dfs | sqlServer | - mysqlServer + """Not used by customer-managed private endpoint services. The sub-resource type (group ID) of the target resource. Note that to connect to workspace root storage (root DBFS), you need two endpoints, one for blob and one for dfs.""" + resource_id: Optional[str] = None + """The Azure resource ID of the target resource.""" + + resource_names: Optional[List[str]] = None + """Only used by private endpoints towards AWS S3 service. + + The globally unique S3 bucket names that will be accessed via the VPC endpoint. The bucket names + must be in the same region as the NCC/endpoint service. When updating this field, we perform + full update on this field. Please ensure a full list of desired resource_names is provided.""" + def as_dict(self) -> dict: """Serializes the CreatePrivateEndpointRule into a dictionary suitable for use as a JSON request body.""" body = {} if self.domain_names: body["domain_names"] = [v for v in self.domain_names] + if self.endpoint_service is not None: + body["endpoint_service"] = self.endpoint_service if self.group_id is not None: body["group_id"] = self.group_id if self.resource_id is not None: body["resource_id"] = self.resource_id + if self.resource_names: + body["resource_names"] = [v for v in self.resource_names] return body def as_shallow_dict(self) -> dict: @@ -995,10 +1009,14 @@ def as_shallow_dict(self) -> dict: body = {} if self.domain_names: body["domain_names"] = self.domain_names + if self.endpoint_service is not None: + body["endpoint_service"] = self.endpoint_service if self.group_id is not None: body["group_id"] = self.group_id if self.resource_id is not None: body["resource_id"] = self.resource_id + if self.resource_names: + body["resource_names"] = self.resource_names return body @classmethod @@ -1006,8 +1024,10 @@ def from_dict(cls, d: Dict[str, Any]) -> CreatePrivateEndpointRule: """Deserializes the CreatePrivateEndpointRule from a dictionary.""" return cls( domain_names=d.get("domain_names", None), + endpoint_service=d.get("endpoint_service", None), group_id=d.get("group_id", None), resource_id=d.get("resource_id", None), + resource_names=d.get("resource_names", None), ) @@ -1166,6 +1186,219 @@ def from_dict(cls, d: Dict[str, Any]) -> CspEnablementAccountSetting: ) +@dataclass +class CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRule: + """Properties of the new private endpoint rule. Note that for private endpoints towards a VPC + endpoint service behind a customer-managed NLB, you must approve the endpoint in AWS console + after initialization.""" + + account_id: Optional[str] = None + """Databricks account ID. You can find your account ID from the Accounts Console.""" + + connection_state: Optional[ + CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState + ] = None + """The current status of this private endpoint. The private endpoint rules are effective only if + the connection state is ESTABLISHED. Remember that you must approve new endpoints on your + resources in the AWS console before they take effect. The possible values are: - PENDING: The + endpoint has been created and pending approval. - ESTABLISHED: The endpoint has been approved + and is ready to use in your serverless compute resources. - REJECTED: Connection was rejected by + the private link resource owner. - DISCONNECTED: Connection was removed by the private link + resource owner, the private endpoint becomes informative and should be deleted for clean-up. - + EXPIRED: If the endpoint is created but not approved in 14 days, it is EXPIRED.""" + + creation_time: Optional[int] = None + """Time in epoch milliseconds when this object was created.""" + + deactivated: Optional[bool] = None + """Whether this private endpoint is deactivated.""" + + deactivated_at: Optional[int] = None + """Time in epoch milliseconds when this object was deactivated.""" + + domain_names: Optional[List[str]] = None + """Only used by private endpoints towards a VPC endpoint service for customer-managed VPC endpoint + service. + + The target AWS resource FQDNs accessible via the VPC endpoint service. When updating this field, + we perform full update on this field. Please ensure a full list of desired domain_names is + provided.""" + + enabled: Optional[bool] = None + """Only used by private endpoints towards an AWS S3 service. + + Update this field to activate/deactivate this private endpoint to allow egress access from + serverless compute resources.""" + + endpoint_service: Optional[str] = None + """The full target AWS endpoint service name that connects to the destination resources of the + private endpoint.""" + + network_connectivity_config_id: Optional[str] = None + """The ID of a network connectivity configuration, which is the parent resource of this private + endpoint rule object.""" + + resource_names: Optional[List[str]] = None + """Only used by private endpoints towards AWS S3 service. + + The globally unique S3 bucket names that will be accessed via the VPC endpoint. The bucket names + must be in the same region as the NCC/endpoint service. When updating this field, we perform + full update on this field. Please ensure a full list of desired resource_names is provided.""" + + rule_id: Optional[str] = None + """The ID of a private endpoint rule.""" + + updated_time: Optional[int] = None + """Time in epoch milliseconds when this object was updated.""" + + vpc_endpoint_id: Optional[str] = None + """The AWS VPC endpoint ID. You can use this ID to identify VPC endpoint created by Databricks.""" + + def as_dict(self) -> dict: + """Serializes the CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRule into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.connection_state is not None: + body["connection_state"] = self.connection_state.value + if self.creation_time is not None: + body["creation_time"] = self.creation_time + if self.deactivated is not None: + body["deactivated"] = self.deactivated + if self.deactivated_at is not None: + body["deactivated_at"] = self.deactivated_at + if self.domain_names: + body["domain_names"] = [v for v in self.domain_names] + if self.enabled is not None: + body["enabled"] = self.enabled + if self.endpoint_service is not None: + body["endpoint_service"] = self.endpoint_service + if self.network_connectivity_config_id is not None: + body["network_connectivity_config_id"] = self.network_connectivity_config_id + if self.resource_names: + body["resource_names"] = [v for v in self.resource_names] + if self.rule_id is not None: + body["rule_id"] = self.rule_id + if self.updated_time is not None: + body["updated_time"] = self.updated_time + if self.vpc_endpoint_id is not None: + body["vpc_endpoint_id"] = self.vpc_endpoint_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRule into a shallow dictionary of its immediate attributes.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.connection_state is not None: + body["connection_state"] = self.connection_state + if self.creation_time is not None: + body["creation_time"] = self.creation_time + if self.deactivated is not None: + body["deactivated"] = self.deactivated + if self.deactivated_at is not None: + body["deactivated_at"] = self.deactivated_at + if self.domain_names: + body["domain_names"] = self.domain_names + if self.enabled is not None: + body["enabled"] = self.enabled + if self.endpoint_service is not None: + body["endpoint_service"] = self.endpoint_service + if self.network_connectivity_config_id is not None: + body["network_connectivity_config_id"] = self.network_connectivity_config_id + if self.resource_names: + body["resource_names"] = self.resource_names + if self.rule_id is not None: + body["rule_id"] = self.rule_id + if self.updated_time is not None: + body["updated_time"] = self.updated_time + if self.vpc_endpoint_id is not None: + body["vpc_endpoint_id"] = self.vpc_endpoint_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRule: + """Deserializes the CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRule from a dictionary.""" + return cls( + account_id=d.get("account_id", None), + connection_state=_enum( + d, + "connection_state", + CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState, + ), + creation_time=d.get("creation_time", None), + deactivated=d.get("deactivated", None), + deactivated_at=d.get("deactivated_at", None), + domain_names=d.get("domain_names", None), + enabled=d.get("enabled", None), + endpoint_service=d.get("endpoint_service", None), + network_connectivity_config_id=d.get("network_connectivity_config_id", None), + resource_names=d.get("resource_names", None), + rule_id=d.get("rule_id", None), + updated_time=d.get("updated_time", None), + vpc_endpoint_id=d.get("vpc_endpoint_id", None), + ) + + +class CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState(Enum): + + DISCONNECTED = "DISCONNECTED" + ESTABLISHED = "ESTABLISHED" + EXPIRED = "EXPIRED" + PENDING = "PENDING" + REJECTED = "REJECTED" + + +@dataclass +class DashboardEmailSubscriptions: + boolean_val: BooleanMessage + + etag: Optional[str] = None + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + etag from a GET request, and pass it with the PATCH request to identify the setting version you + are updating.""" + + setting_name: Optional[str] = None + """Name of the corresponding setting. This field is populated in the response, but it will not be + respected even if it's set in the request body. The setting name in the path parameter will be + respected instead. Setting name is required to be 'default' if the setting only has one instance + per workspace.""" + + def as_dict(self) -> dict: + """Serializes the DashboardEmailSubscriptions into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.boolean_val: + body["boolean_val"] = self.boolean_val.as_dict() + if self.etag is not None: + body["etag"] = self.etag + if self.setting_name is not None: + body["setting_name"] = self.setting_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DashboardEmailSubscriptions into a shallow dictionary of its immediate attributes.""" + body = {} + if self.boolean_val: + body["boolean_val"] = self.boolean_val + if self.etag is not None: + body["etag"] = self.etag + if self.setting_name is not None: + body["setting_name"] = self.setting_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DashboardEmailSubscriptions: + """Deserializes the DashboardEmailSubscriptions from a dictionary.""" + return cls( + boolean_val=_from_dict(d, "boolean_val", BooleanMessage), + etag=d.get("etag", None), + setting_name=d.get("setting_name", None), + ) + + @dataclass class DefaultNamespaceSetting: """This represents the setting configuration for the default namespace in the Databricks workspace. @@ -1320,6 +1553,38 @@ def from_dict(cls, d: Dict[str, Any]) -> DeleteAibiDashboardEmbeddingApprovedDom return cls(etag=d.get("etag", None)) +@dataclass +class DeleteDashboardEmailSubscriptionsResponse: + """The etag is returned.""" + + etag: str + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + an etag from a GET request, and pass it with the DELETE request to identify the rule set version + you are deleting.""" + + def as_dict(self) -> dict: + """Serializes the DeleteDashboardEmailSubscriptionsResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.etag is not None: + body["etag"] = self.etag + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DeleteDashboardEmailSubscriptionsResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.etag is not None: + body["etag"] = self.etag + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DeleteDashboardEmailSubscriptionsResponse: + """Deserializes the DeleteDashboardEmailSubscriptionsResponse from a dictionary.""" + return cls(etag=d.get("etag", None)) + + @dataclass class DeleteDefaultNamespaceSettingResponse: """The etag is returned.""" @@ -1598,6 +1863,38 @@ def from_dict(cls, d: Dict[str, Any]) -> DeleteRestrictWorkspaceAdminsSettingRes return cls(etag=d.get("etag", None)) +@dataclass +class DeleteSqlResultsDownloadResponse: + """The etag is returned.""" + + etag: str + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> delete pattern to perform setting deletions in order to avoid race conditions. That is, get + an etag from a GET request, and pass it with the DELETE request to identify the rule set version + you are deleting.""" + + def as_dict(self) -> dict: + """Serializes the DeleteSqlResultsDownloadResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.etag is not None: + body["etag"] = self.etag + return body + + def as_shallow_dict(self) -> dict: + """Serializes the DeleteSqlResultsDownloadResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.etag is not None: + body["etag"] = self.etag + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> DeleteSqlResultsDownloadResponse: + """Deserializes the DeleteSqlResultsDownloadResponse from a dictionary.""" + return cls(etag=d.get("etag", None)) + + class DestinationType(Enum): EMAIL = "EMAIL" @@ -3073,43 +3370,6 @@ def from_dict(cls, d: Dict[str, Any]) -> ListIpAccessListResponse: return cls(ip_access_lists=_repeated_dict(d, "ip_access_lists", IpAccessListInfo)) -@dataclass -class ListNccAzurePrivateEndpointRulesResponse: - """The private endpoint rule list was successfully retrieved.""" - - items: Optional[List[NccAzurePrivateEndpointRule]] = None - - next_page_token: Optional[str] = None - """A token that can be used to get the next page of results. If null, there are no more results to - show.""" - - def as_dict(self) -> dict: - """Serializes the ListNccAzurePrivateEndpointRulesResponse into a dictionary suitable for use as a JSON request body.""" - body = {} - if self.items: - body["items"] = [v.as_dict() for v in self.items] - if self.next_page_token is not None: - body["next_page_token"] = self.next_page_token - return body - - def as_shallow_dict(self) -> dict: - """Serializes the ListNccAzurePrivateEndpointRulesResponse into a shallow dictionary of its immediate attributes.""" - body = {} - if self.items: - body["items"] = self.items - if self.next_page_token is not None: - body["next_page_token"] = self.next_page_token - return body - - @classmethod - def from_dict(cls, d: Dict[str, Any]) -> ListNccAzurePrivateEndpointRulesResponse: - """Deserializes the ListNccAzurePrivateEndpointRulesResponse from a dictionary.""" - return cls( - items=_repeated_dict(d, "items", NccAzurePrivateEndpointRule), - next_page_token=d.get("next_page_token", None), - ) - - @dataclass class ListNetworkConnectivityConfigurationsResponse: """The network connectivity configuration list was successfully retrieved.""" @@ -3259,6 +3519,42 @@ def from_dict(cls, d: Dict[str, Any]) -> ListNotificationDestinationsResult: ) +@dataclass +class ListPrivateEndpointRulesResponse: + """The private endpoint rule list was successfully retrieved.""" + + items: Optional[List[NccPrivateEndpointRule]] = None + + next_page_token: Optional[str] = None + """A token that can be used to get the next page of results. If null, there are no more results to + show.""" + + def as_dict(self) -> dict: + """Serializes the ListPrivateEndpointRulesResponse into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.items: + body["items"] = [v.as_dict() for v in self.items] + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + def as_shallow_dict(self) -> dict: + """Serializes the ListPrivateEndpointRulesResponse into a shallow dictionary of its immediate attributes.""" + body = {} + if self.items: + body["items"] = self.items + if self.next_page_token is not None: + body["next_page_token"] = self.next_page_token + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> ListPrivateEndpointRulesResponse: + """Deserializes the ListPrivateEndpointRulesResponse from a dictionary.""" + return cls( + items=_repeated_dict(d, "items", NccPrivateEndpointRule), next_page_token=d.get("next_page_token", None) + ) + + @dataclass class ListPublicTokensResponse: token_infos: Optional[List[PublicTokenInfo]] = None @@ -3558,7 +3854,7 @@ class NccAzurePrivateEndpointRule: """Time in epoch milliseconds when this object was deactivated.""" domain_names: Optional[List[str]] = None - """Only used by private endpoints to customer-managed resources. + """Not used by customer-managed private endpoint services. Domain names of target private link service. When updating this field, the full list of target domain_names must be specified.""" @@ -3567,8 +3863,7 @@ class NccAzurePrivateEndpointRule: """The name of the Azure private endpoint resource.""" group_id: Optional[str] = None - """Only used by private endpoints to Azure first-party services. Enum: blob | dfs | sqlServer | - mysqlServer + """Only used by private endpoints to Azure first-party services. The sub-resource type (group ID) of the target resource. Note that to connect to workspace root storage (root DBFS), you need two endpoints, one for blob and one for dfs.""" @@ -3796,11 +4091,16 @@ def from_dict(cls, d: Dict[str, Any]) -> NccEgressDefaultRules: class NccEgressTargetRules: """Target rule controls the egress rules that are dedicated to specific resources.""" + aws_private_endpoint_rules: Optional[List[CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRule]] = None + """AWS private endpoint rule controls the AWS private endpoint based egress rules.""" + azure_private_endpoint_rules: Optional[List[NccAzurePrivateEndpointRule]] = None def as_dict(self) -> dict: """Serializes the NccEgressTargetRules into a dictionary suitable for use as a JSON request body.""" body = {} + if self.aws_private_endpoint_rules: + body["aws_private_endpoint_rules"] = [v.as_dict() for v in self.aws_private_endpoint_rules] if self.azure_private_endpoint_rules: body["azure_private_endpoint_rules"] = [v.as_dict() for v in self.azure_private_endpoint_rules] return body @@ -3808,6 +4108,8 @@ def as_dict(self) -> dict: def as_shallow_dict(self) -> dict: """Serializes the NccEgressTargetRules into a shallow dictionary of its immediate attributes.""" body = {} + if self.aws_private_endpoint_rules: + body["aws_private_endpoint_rules"] = self.aws_private_endpoint_rules if self.azure_private_endpoint_rules: body["azure_private_endpoint_rules"] = self.azure_private_endpoint_rules return body @@ -3816,16 +4118,200 @@ def as_shallow_dict(self) -> dict: def from_dict(cls, d: Dict[str, Any]) -> NccEgressTargetRules: """Deserializes the NccEgressTargetRules from a dictionary.""" return cls( - azure_private_endpoint_rules=_repeated_dict(d, "azure_private_endpoint_rules", NccAzurePrivateEndpointRule) + aws_private_endpoint_rules=_repeated_dict( + d, "aws_private_endpoint_rules", CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRule + ), + azure_private_endpoint_rules=_repeated_dict(d, "azure_private_endpoint_rules", NccAzurePrivateEndpointRule), ) +@dataclass +class NccPrivateEndpointRule: + """Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure + portal after initialization.""" + + account_id: Optional[str] = None + """Databricks account ID. You can find your account ID from the Accounts Console.""" + + connection_state: Optional[NccPrivateEndpointRulePrivateLinkConnectionState] = None + """The current status of this private endpoint. The private endpoint rules are effective only if + the connection state is ESTABLISHED. Remember that you must approve new endpoints on your + resources in the Cloud console before they take effect. The possible values are: - PENDING: The + endpoint has been created and pending approval. - ESTABLISHED: The endpoint has been approved + and is ready to use in your serverless compute resources. - REJECTED: Connection was rejected by + the private link resource owner. - DISCONNECTED: Connection was removed by the private link + resource owner, the private endpoint becomes informative and should be deleted for clean-up. - + EXPIRED: If the endpoint was created but not approved in 14 days, it will be EXPIRED.""" + + creation_time: Optional[int] = None + """Time in epoch milliseconds when this object was created.""" + + deactivated: Optional[bool] = None + """Whether this private endpoint is deactivated.""" + + deactivated_at: Optional[int] = None + """Time in epoch milliseconds when this object was deactivated.""" + + domain_names: Optional[List[str]] = None + """Only used by private endpoints to customer-managed private endpoint services. + + Domain names of target private link service. When updating this field, the full list of target + domain_names must be specified.""" + + enabled: Optional[bool] = None + """Only used by private endpoints towards an AWS S3 service. + + Update this field to activate/deactivate this private endpoint to allow egress access from + serverless compute resources.""" + + endpoint_name: Optional[str] = None + """The name of the Azure private endpoint resource.""" + + endpoint_service: Optional[str] = None + """The full target AWS endpoint service name that connects to the destination resources of the + private endpoint.""" + + group_id: Optional[str] = None + """Not used by customer-managed private endpoint services. + + The sub-resource type (group ID) of the target resource. Note that to connect to workspace root + storage (root DBFS), you need two endpoints, one for blob and one for dfs.""" + + network_connectivity_config_id: Optional[str] = None + """The ID of a network connectivity configuration, which is the parent resource of this private + endpoint rule object.""" + + resource_id: Optional[str] = None + """The Azure resource ID of the target resource.""" + + resource_names: Optional[List[str]] = None + """Only used by private endpoints towards AWS S3 service. + + The globally unique S3 bucket names that will be accessed via the VPC endpoint. The bucket names + must be in the same region as the NCC/endpoint service. When updating this field, we perform + full update on this field. Please ensure a full list of desired resource_names is provided.""" + + rule_id: Optional[str] = None + """The ID of a private endpoint rule.""" + + updated_time: Optional[int] = None + """Time in epoch milliseconds when this object was updated.""" + + vpc_endpoint_id: Optional[str] = None + """The AWS VPC endpoint ID. You can use this ID to identify the VPC endpoint created by Databricks.""" + + def as_dict(self) -> dict: + """Serializes the NccPrivateEndpointRule into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.connection_state is not None: + body["connection_state"] = self.connection_state.value + if self.creation_time is not None: + body["creation_time"] = self.creation_time + if self.deactivated is not None: + body["deactivated"] = self.deactivated + if self.deactivated_at is not None: + body["deactivated_at"] = self.deactivated_at + if self.domain_names: + body["domain_names"] = [v for v in self.domain_names] + if self.enabled is not None: + body["enabled"] = self.enabled + if self.endpoint_name is not None: + body["endpoint_name"] = self.endpoint_name + if self.endpoint_service is not None: + body["endpoint_service"] = self.endpoint_service + if self.group_id is not None: + body["group_id"] = self.group_id + if self.network_connectivity_config_id is not None: + body["network_connectivity_config_id"] = self.network_connectivity_config_id + if self.resource_id is not None: + body["resource_id"] = self.resource_id + if self.resource_names: + body["resource_names"] = [v for v in self.resource_names] + if self.rule_id is not None: + body["rule_id"] = self.rule_id + if self.updated_time is not None: + body["updated_time"] = self.updated_time + if self.vpc_endpoint_id is not None: + body["vpc_endpoint_id"] = self.vpc_endpoint_id + return body + + def as_shallow_dict(self) -> dict: + """Serializes the NccPrivateEndpointRule into a shallow dictionary of its immediate attributes.""" + body = {} + if self.account_id is not None: + body["account_id"] = self.account_id + if self.connection_state is not None: + body["connection_state"] = self.connection_state + if self.creation_time is not None: + body["creation_time"] = self.creation_time + if self.deactivated is not None: + body["deactivated"] = self.deactivated + if self.deactivated_at is not None: + body["deactivated_at"] = self.deactivated_at + if self.domain_names: + body["domain_names"] = self.domain_names + if self.enabled is not None: + body["enabled"] = self.enabled + if self.endpoint_name is not None: + body["endpoint_name"] = self.endpoint_name + if self.endpoint_service is not None: + body["endpoint_service"] = self.endpoint_service + if self.group_id is not None: + body["group_id"] = self.group_id + if self.network_connectivity_config_id is not None: + body["network_connectivity_config_id"] = self.network_connectivity_config_id + if self.resource_id is not None: + body["resource_id"] = self.resource_id + if self.resource_names: + body["resource_names"] = self.resource_names + if self.rule_id is not None: + body["rule_id"] = self.rule_id + if self.updated_time is not None: + body["updated_time"] = self.updated_time + if self.vpc_endpoint_id is not None: + body["vpc_endpoint_id"] = self.vpc_endpoint_id + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> NccPrivateEndpointRule: + """Deserializes the NccPrivateEndpointRule from a dictionary.""" + return cls( + account_id=d.get("account_id", None), + connection_state=_enum(d, "connection_state", NccPrivateEndpointRulePrivateLinkConnectionState), + creation_time=d.get("creation_time", None), + deactivated=d.get("deactivated", None), + deactivated_at=d.get("deactivated_at", None), + domain_names=d.get("domain_names", None), + enabled=d.get("enabled", None), + endpoint_name=d.get("endpoint_name", None), + endpoint_service=d.get("endpoint_service", None), + group_id=d.get("group_id", None), + network_connectivity_config_id=d.get("network_connectivity_config_id", None), + resource_id=d.get("resource_id", None), + resource_names=d.get("resource_names", None), + rule_id=d.get("rule_id", None), + updated_time=d.get("updated_time", None), + vpc_endpoint_id=d.get("vpc_endpoint_id", None), + ) + + +class NccPrivateEndpointRulePrivateLinkConnectionState(Enum): + + DISCONNECTED = "DISCONNECTED" + ESTABLISHED = "ESTABLISHED" + EXPIRED = "EXPIRED" + PENDING = "PENDING" + REJECTED = "REJECTED" + + @dataclass class NetworkConnectivityConfiguration: """Properties of the new network connectivity configuration.""" account_id: Optional[str] = None - """The Databricks account ID that hosts the credential.""" + """Your Databricks account ID. You can find your account ID in your Databricks accounts console.""" creation_time: Optional[int] = None """Time in epoch milliseconds when this object was created.""" @@ -4440,6 +4926,56 @@ def from_dict(cls, d: Dict[str, Any]) -> SlackConfig: return cls(url=d.get("url", None), url_set=d.get("url_set", None)) +@dataclass +class SqlResultsDownload: + boolean_val: BooleanMessage + + etag: Optional[str] = None + """etag used for versioning. The response is at least as fresh as the eTag provided. This is used + for optimistic concurrency control as a way to help prevent simultaneous writes of a setting + overwriting each other. It is strongly suggested that systems make use of the etag in the read + -> update pattern to perform setting updates in order to avoid race conditions. That is, get an + etag from a GET request, and pass it with the PATCH request to identify the setting version you + are updating.""" + + setting_name: Optional[str] = None + """Name of the corresponding setting. This field is populated in the response, but it will not be + respected even if it's set in the request body. The setting name in the path parameter will be + respected instead. Setting name is required to be 'default' if the setting only has one instance + per workspace.""" + + def as_dict(self) -> dict: + """Serializes the SqlResultsDownload into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.boolean_val: + body["boolean_val"] = self.boolean_val.as_dict() + if self.etag is not None: + body["etag"] = self.etag + if self.setting_name is not None: + body["setting_name"] = self.setting_name + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SqlResultsDownload into a shallow dictionary of its immediate attributes.""" + body = {} + if self.boolean_val: + body["boolean_val"] = self.boolean_val + if self.etag is not None: + body["etag"] = self.etag + if self.setting_name is not None: + body["setting_name"] = self.setting_name + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SqlResultsDownload: + """Deserializes the SqlResultsDownload from a dictionary.""" + return cls( + boolean_val=_from_dict(d, "boolean_val", BooleanMessage), + etag=d.get("etag", None), + setting_name=d.get("setting_name", None), + ) + + @dataclass class StringMessage: value: Optional[str] = None @@ -4985,7 +5521,59 @@ class UpdateAutomaticClusterUpdateSettingRequest: allow_missing: bool """This should always be set to true for Settings API. Added for AIP compliance.""" - setting: AutomaticClusterUpdateSetting + setting: AutomaticClusterUpdateSetting + + field_mask: str + """The field mask must be a single string, with multiple fields separated by commas (no spaces). + The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + (e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed, + as only the entire collection field can be specified. Field names must exactly match the + resource field names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the + API changes in the future.""" + + def as_dict(self) -> dict: + """Serializes the UpdateAutomaticClusterUpdateSettingRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.allow_missing is not None: + body["allow_missing"] = self.allow_missing + if self.field_mask is not None: + body["field_mask"] = self.field_mask + if self.setting: + body["setting"] = self.setting.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateAutomaticClusterUpdateSettingRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.allow_missing is not None: + body["allow_missing"] = self.allow_missing + if self.field_mask is not None: + body["field_mask"] = self.field_mask + if self.setting: + body["setting"] = self.setting + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateAutomaticClusterUpdateSettingRequest: + """Deserializes the UpdateAutomaticClusterUpdateSettingRequest from a dictionary.""" + return cls( + allow_missing=d.get("allow_missing", None), + field_mask=d.get("field_mask", None), + setting=_from_dict(d, "setting", AutomaticClusterUpdateSetting), + ) + + +@dataclass +class UpdateComplianceSecurityProfileSettingRequest: + """Details required to update a setting.""" + + allow_missing: bool + """This should always be set to true for Settings API. Added for AIP compliance.""" + + setting: ComplianceSecurityProfileSetting field_mask: str """The field mask must be a single string, with multiple fields separated by commas (no spaces). @@ -4999,7 +5587,7 @@ class UpdateAutomaticClusterUpdateSettingRequest: API changes in the future.""" def as_dict(self) -> dict: - """Serializes the UpdateAutomaticClusterUpdateSettingRequest into a dictionary suitable for use as a JSON request body.""" + """Serializes the UpdateComplianceSecurityProfileSettingRequest into a dictionary suitable for use as a JSON request body.""" body = {} if self.allow_missing is not None: body["allow_missing"] = self.allow_missing @@ -5010,7 +5598,7 @@ def as_dict(self) -> dict: return body def as_shallow_dict(self) -> dict: - """Serializes the UpdateAutomaticClusterUpdateSettingRequest into a shallow dictionary of its immediate attributes.""" + """Serializes the UpdateComplianceSecurityProfileSettingRequest into a shallow dictionary of its immediate attributes.""" body = {} if self.allow_missing is not None: body["allow_missing"] = self.allow_missing @@ -5021,23 +5609,23 @@ def as_shallow_dict(self) -> dict: return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateAutomaticClusterUpdateSettingRequest: - """Deserializes the UpdateAutomaticClusterUpdateSettingRequest from a dictionary.""" + def from_dict(cls, d: Dict[str, Any]) -> UpdateComplianceSecurityProfileSettingRequest: + """Deserializes the UpdateComplianceSecurityProfileSettingRequest from a dictionary.""" return cls( allow_missing=d.get("allow_missing", None), field_mask=d.get("field_mask", None), - setting=_from_dict(d, "setting", AutomaticClusterUpdateSetting), + setting=_from_dict(d, "setting", ComplianceSecurityProfileSetting), ) @dataclass -class UpdateComplianceSecurityProfileSettingRequest: +class UpdateCspEnablementAccountSettingRequest: """Details required to update a setting.""" allow_missing: bool """This should always be set to true for Settings API. Added for AIP compliance.""" - setting: ComplianceSecurityProfileSetting + setting: CspEnablementAccountSetting field_mask: str """The field mask must be a single string, with multiple fields separated by commas (no spaces). @@ -5051,7 +5639,7 @@ class UpdateComplianceSecurityProfileSettingRequest: API changes in the future.""" def as_dict(self) -> dict: - """Serializes the UpdateComplianceSecurityProfileSettingRequest into a dictionary suitable for use as a JSON request body.""" + """Serializes the UpdateCspEnablementAccountSettingRequest into a dictionary suitable for use as a JSON request body.""" body = {} if self.allow_missing is not None: body["allow_missing"] = self.allow_missing @@ -5062,7 +5650,7 @@ def as_dict(self) -> dict: return body def as_shallow_dict(self) -> dict: - """Serializes the UpdateComplianceSecurityProfileSettingRequest into a shallow dictionary of its immediate attributes.""" + """Serializes the UpdateCspEnablementAccountSettingRequest into a shallow dictionary of its immediate attributes.""" body = {} if self.allow_missing is not None: body["allow_missing"] = self.allow_missing @@ -5073,23 +5661,23 @@ def as_shallow_dict(self) -> dict: return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateComplianceSecurityProfileSettingRequest: - """Deserializes the UpdateComplianceSecurityProfileSettingRequest from a dictionary.""" + def from_dict(cls, d: Dict[str, Any]) -> UpdateCspEnablementAccountSettingRequest: + """Deserializes the UpdateCspEnablementAccountSettingRequest from a dictionary.""" return cls( allow_missing=d.get("allow_missing", None), field_mask=d.get("field_mask", None), - setting=_from_dict(d, "setting", ComplianceSecurityProfileSetting), + setting=_from_dict(d, "setting", CspEnablementAccountSetting), ) @dataclass -class UpdateCspEnablementAccountSettingRequest: +class UpdateDashboardEmailSubscriptionsRequest: """Details required to update a setting.""" allow_missing: bool """This should always be set to true for Settings API. Added for AIP compliance.""" - setting: CspEnablementAccountSetting + setting: DashboardEmailSubscriptions field_mask: str """The field mask must be a single string, with multiple fields separated by commas (no spaces). @@ -5103,7 +5691,7 @@ class UpdateCspEnablementAccountSettingRequest: API changes in the future.""" def as_dict(self) -> dict: - """Serializes the UpdateCspEnablementAccountSettingRequest into a dictionary suitable for use as a JSON request body.""" + """Serializes the UpdateDashboardEmailSubscriptionsRequest into a dictionary suitable for use as a JSON request body.""" body = {} if self.allow_missing is not None: body["allow_missing"] = self.allow_missing @@ -5114,7 +5702,7 @@ def as_dict(self) -> dict: return body def as_shallow_dict(self) -> dict: - """Serializes the UpdateCspEnablementAccountSettingRequest into a shallow dictionary of its immediate attributes.""" + """Serializes the UpdateDashboardEmailSubscriptionsRequest into a shallow dictionary of its immediate attributes.""" body = {} if self.allow_missing is not None: body["allow_missing"] = self.allow_missing @@ -5125,12 +5713,12 @@ def as_shallow_dict(self) -> dict: return body @classmethod - def from_dict(cls, d: Dict[str, Any]) -> UpdateCspEnablementAccountSettingRequest: - """Deserializes the UpdateCspEnablementAccountSettingRequest from a dictionary.""" + def from_dict(cls, d: Dict[str, Any]) -> UpdateDashboardEmailSubscriptionsRequest: + """Deserializes the UpdateDashboardEmailSubscriptionsRequest from a dictionary.""" return cls( allow_missing=d.get("allow_missing", None), field_mask=d.get("field_mask", None), - setting=_from_dict(d, "setting", CspEnablementAccountSetting), + setting=_from_dict(d, "setting", DashboardEmailSubscriptions), ) @@ -5927,16 +6515,33 @@ class UpdatePrivateEndpointRule: portal after initialization.""" domain_names: Optional[List[str]] = None - """Only used by private endpoints to customer-managed resources. + """Only used by private endpoints to customer-managed private endpoint services. Domain names of target private link service. When updating this field, the full list of target domain_names must be specified.""" + enabled: Optional[bool] = None + """Only used by private endpoints towards an AWS S3 service. + + Update this field to activate/deactivate this private endpoint to allow egress access from + serverless compute resources.""" + + resource_names: Optional[List[str]] = None + """Only used by private endpoints towards AWS S3 service. + + The globally unique S3 bucket names that will be accessed via the VPC endpoint. The bucket names + must be in the same region as the NCC/endpoint service. When updating this field, we perform + full update on this field. Please ensure a full list of desired resource_names is provided.""" + def as_dict(self) -> dict: """Serializes the UpdatePrivateEndpointRule into a dictionary suitable for use as a JSON request body.""" body = {} if self.domain_names: body["domain_names"] = [v for v in self.domain_names] + if self.enabled is not None: + body["enabled"] = self.enabled + if self.resource_names: + body["resource_names"] = [v for v in self.resource_names] return body def as_shallow_dict(self) -> dict: @@ -5944,12 +6549,20 @@ def as_shallow_dict(self) -> dict: body = {} if self.domain_names: body["domain_names"] = self.domain_names + if self.enabled is not None: + body["enabled"] = self.enabled + if self.resource_names: + body["resource_names"] = self.resource_names return body @classmethod def from_dict(cls, d: Dict[str, Any]) -> UpdatePrivateEndpointRule: """Deserializes the UpdatePrivateEndpointRule from a dictionary.""" - return cls(domain_names=d.get("domain_names", None)) + return cls( + domain_names=d.get("domain_names", None), + enabled=d.get("enabled", None), + resource_names=d.get("resource_names", None), + ) @dataclass @@ -6022,6 +6635,58 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateRestrictWorkspaceAdminsSettingReq ) +@dataclass +class UpdateSqlResultsDownloadRequest: + """Details required to update a setting.""" + + allow_missing: bool + """This should always be set to true for Settings API. Added for AIP compliance.""" + + setting: SqlResultsDownload + + field_mask: str + """The field mask must be a single string, with multiple fields separated by commas (no spaces). + The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields + (e.g., `author.given_name`). Specification of elements in sequence or map fields is not allowed, + as only the entire collection field can be specified. Field names must exactly match the + resource field names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the + API changes in the future.""" + + def as_dict(self) -> dict: + """Serializes the UpdateSqlResultsDownloadRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.allow_missing is not None: + body["allow_missing"] = self.allow_missing + if self.field_mask is not None: + body["field_mask"] = self.field_mask + if self.setting: + body["setting"] = self.setting.as_dict() + return body + + def as_shallow_dict(self) -> dict: + """Serializes the UpdateSqlResultsDownloadRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.allow_missing is not None: + body["allow_missing"] = self.allow_missing + if self.field_mask is not None: + body["field_mask"] = self.field_mask + if self.setting: + body["setting"] = self.setting + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> UpdateSqlResultsDownloadRequest: + """Deserializes the UpdateSqlResultsDownloadRequest from a dictionary.""" + return cls( + allow_missing=d.get("allow_missing", None), + field_mask=d.get("field_mask", None), + setting=_from_dict(d, "setting", SqlResultsDownload), + ) + + WorkspaceConf = Dict[str, str] @@ -6851,6 +7516,112 @@ def update( return CspEnablementAccountSetting.from_dict(res) +class DashboardEmailSubscriptionsAPI: + """Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can send + subscription emails containing PDFs and/or images of the dashboard. By default, this setting is enabled + (set to `true`)""" + + def __init__(self, api_client): + self._api = api_client + + def delete(self, *, etag: Optional[str] = None) -> DeleteDashboardEmailSubscriptionsResponse: + """Delete the Dashboard Email Subscriptions setting. + + Reverts the Dashboard Email Subscriptions setting to its default value. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteDashboardEmailSubscriptionsResponse` + """ + + query = {} + if etag is not None: + query["etag"] = etag + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "DELETE", + "/api/2.0/settings/types/dashboard_email_subscriptions/names/default", + query=query, + headers=headers, + ) + return DeleteDashboardEmailSubscriptionsResponse.from_dict(res) + + def get(self, *, etag: Optional[str] = None) -> DashboardEmailSubscriptions: + """Get the Dashboard Email Subscriptions setting. + + Gets the Dashboard Email Subscriptions setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DashboardEmailSubscriptions` + """ + + query = {} + if etag is not None: + query["etag"] = etag + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", "/api/2.0/settings/types/dashboard_email_subscriptions/names/default", query=query, headers=headers + ) + return DashboardEmailSubscriptions.from_dict(res) + + def update( + self, allow_missing: bool, setting: DashboardEmailSubscriptions, field_mask: str + ) -> DashboardEmailSubscriptions: + """Update the Dashboard Email Subscriptions setting. + + Updates the Dashboard Email Subscriptions setting. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`DashboardEmailSubscriptions` + :param field_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`DashboardEmailSubscriptions` + """ + body = {} + if allow_missing is not None: + body["allow_missing"] = allow_missing + if field_mask is not None: + body["field_mask"] = field_mask + if setting is not None: + body["setting"] = setting.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", "/api/2.0/settings/types/dashboard_email_subscriptions/names/default", body=body, headers=headers + ) + return DashboardEmailSubscriptions.from_dict(res) + + class DefaultNamespaceAPI: """The default namespace setting API allows users to configure the default namespace for a Databricks workspace. @@ -8297,7 +9068,7 @@ def create_network_connectivity_configuration( def create_private_endpoint_rule( self, network_connectivity_config_id: str, private_endpoint_rule: CreatePrivateEndpointRule - ) -> NccAzurePrivateEndpointRule: + ) -> NccPrivateEndpointRule: """Create a private endpoint rule. Create a private endpoint rule for the specified network connectivity config object. Once the object @@ -8316,7 +9087,7 @@ def create_private_endpoint_rule( Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure portal after initialization. - :returns: :class:`NccAzurePrivateEndpointRule` + :returns: :class:`NccPrivateEndpointRule` """ body = private_endpoint_rule.as_dict() headers = { @@ -8330,7 +9101,7 @@ def create_private_endpoint_rule( body=body, headers=headers, ) - return NccAzurePrivateEndpointRule.from_dict(res) + return NccPrivateEndpointRule.from_dict(res) def delete_network_connectivity_configuration(self, network_connectivity_config_id: str): """Delete a network connectivity configuration. @@ -8355,7 +9126,7 @@ def delete_network_connectivity_configuration(self, network_connectivity_config_ def delete_private_endpoint_rule( self, network_connectivity_config_id: str, private_endpoint_rule_id: str - ) -> NccAzurePrivateEndpointRule: + ) -> NccPrivateEndpointRule: """Delete a private endpoint rule. Initiates deleting a private endpoint rule. If the connection state is PENDING or EXPIRED, the private @@ -8368,7 +9139,7 @@ def delete_private_endpoint_rule( :param private_endpoint_rule_id: str Your private endpoint rule ID. - :returns: :class:`NccAzurePrivateEndpointRule` + :returns: :class:`NccPrivateEndpointRule` """ headers = { @@ -8380,7 +9151,7 @@ def delete_private_endpoint_rule( f"/api/2.0/accounts/{self._api.account_id}/network-connectivity-configs/{network_connectivity_config_id}/private-endpoint-rules/{private_endpoint_rule_id}", headers=headers, ) - return NccAzurePrivateEndpointRule.from_dict(res) + return NccPrivateEndpointRule.from_dict(res) def get_network_connectivity_configuration( self, network_connectivity_config_id: str @@ -8408,7 +9179,7 @@ def get_network_connectivity_configuration( def get_private_endpoint_rule( self, network_connectivity_config_id: str, private_endpoint_rule_id: str - ) -> NccAzurePrivateEndpointRule: + ) -> NccPrivateEndpointRule: """Gets a private endpoint rule. Gets the private endpoint rule. @@ -8418,7 +9189,7 @@ def get_private_endpoint_rule( :param private_endpoint_rule_id: str Your private endpoint rule ID. - :returns: :class:`NccAzurePrivateEndpointRule` + :returns: :class:`NccPrivateEndpointRule` """ headers = { @@ -8430,7 +9201,7 @@ def get_private_endpoint_rule( f"/api/2.0/accounts/{self._api.account_id}/network-connectivity-configs/{network_connectivity_config_id}/private-endpoint-rules/{private_endpoint_rule_id}", headers=headers, ) - return NccAzurePrivateEndpointRule.from_dict(res) + return NccPrivateEndpointRule.from_dict(res) def list_network_connectivity_configurations( self, *, page_token: Optional[str] = None @@ -8468,7 +9239,7 @@ def list_network_connectivity_configurations( def list_private_endpoint_rules( self, network_connectivity_config_id: str, *, page_token: Optional[str] = None - ) -> Iterator[NccAzurePrivateEndpointRule]: + ) -> Iterator[NccPrivateEndpointRule]: """List private endpoint rules. Gets an array of private endpoint rules. @@ -8478,7 +9249,7 @@ def list_private_endpoint_rules( :param page_token: str (optional) Pagination token to go to next page based on previous query. - :returns: Iterator over :class:`NccAzurePrivateEndpointRule` + :returns: Iterator over :class:`NccPrivateEndpointRule` """ query = {} @@ -8497,25 +9268,26 @@ def list_private_endpoint_rules( ) if "items" in json: for v in json["items"]: - yield NccAzurePrivateEndpointRule.from_dict(v) + yield NccPrivateEndpointRule.from_dict(v) if "next_page_token" not in json or not json["next_page_token"]: return query["page_token"] = json["next_page_token"] - def update_ncc_azure_private_endpoint_rule_public( + def update_private_endpoint_rule( self, network_connectivity_config_id: str, private_endpoint_rule_id: str, private_endpoint_rule: UpdatePrivateEndpointRule, update_mask: str, - ) -> NccAzurePrivateEndpointRule: + ) -> NccPrivateEndpointRule: """Update a private endpoint rule. Updates a private endpoint rule. Currently only a private endpoint rule to customer-managed resources is allowed to be updated. :param network_connectivity_config_id: str - Your Network Connectivity Configuration ID. + The ID of a network connectivity configuration, which is the parent resource of this private + endpoint rule object. :param private_endpoint_rule_id: str Your private endpoint rule ID. :param private_endpoint_rule: :class:`UpdatePrivateEndpointRule` @@ -8528,7 +9300,7 @@ def update_ncc_azure_private_endpoint_rule_public( the entire collection field can be specified. Field names must exactly match the resource field names. - :returns: :class:`NccAzurePrivateEndpointRule` + :returns: :class:`NccPrivateEndpointRule` """ body = private_endpoint_rule.as_dict() query = {} @@ -8546,7 +9318,7 @@ def update_ncc_azure_private_endpoint_rule_public( body=body, headers=headers, ) - return NccAzurePrivateEndpointRule.from_dict(res) + return NccPrivateEndpointRule.from_dict(res) class NetworkPoliciesAPI: @@ -9045,6 +9817,7 @@ def __init__(self, api_client): self._aibi_dashboard_embedding_approved_domains = AibiDashboardEmbeddingApprovedDomainsAPI(self._api) self._automatic_cluster_update = AutomaticClusterUpdateAPI(self._api) self._compliance_security_profile = ComplianceSecurityProfileAPI(self._api) + self._dashboard_email_subscriptions = DashboardEmailSubscriptionsAPI(self._api) self._default_namespace = DefaultNamespaceAPI(self._api) self._disable_legacy_access = DisableLegacyAccessAPI(self._api) self._disable_legacy_dbfs = DisableLegacyDbfsAPI(self._api) @@ -9054,6 +9827,7 @@ def __init__(self, api_client): self._enhanced_security_monitoring = EnhancedSecurityMonitoringAPI(self._api) self._llm_proxy_partner_powered_workspace = LlmProxyPartnerPoweredWorkspaceAPI(self._api) self._restrict_workspace_admins = RestrictWorkspaceAdminsAPI(self._api) + self._sql_results_download = SqlResultsDownloadAPI(self._api) @property def aibi_dashboard_embedding_access_policy(self) -> AibiDashboardEmbeddingAccessPolicyAPI: @@ -9075,6 +9849,11 @@ def compliance_security_profile(self) -> ComplianceSecurityProfileAPI: """Controls whether to enable the compliance security profile for the current workspace.""" return self._compliance_security_profile + @property + def dashboard_email_subscriptions(self) -> DashboardEmailSubscriptionsAPI: + """Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can send subscription emails containing PDFs and/or images of the dashboard.""" + return self._dashboard_email_subscriptions + @property def default_namespace(self) -> DefaultNamespaceAPI: """The default namespace setting API allows users to configure the default namespace for a Databricks workspace.""" @@ -9120,6 +9899,111 @@ def restrict_workspace_admins(self) -> RestrictWorkspaceAdminsAPI: """The Restrict Workspace Admins setting lets you control the capabilities of workspace admins.""" return self._restrict_workspace_admins + @property + def sql_results_download(self) -> SqlResultsDownloadAPI: + """Controls whether users within the workspace are allowed to download results from the SQL Editor and AI/BI Dashboards UIs.""" + return self._sql_results_download + + +class SqlResultsDownloadAPI: + """Controls whether users within the workspace are allowed to download results from the SQL Editor and AI/BI + Dashboards UIs. By default, this setting is enabled (set to `true`)""" + + def __init__(self, api_client): + self._api = api_client + + def delete(self, *, etag: Optional[str] = None) -> DeleteSqlResultsDownloadResponse: + """Delete the SQL Results Download setting. + + Reverts the SQL Results Download setting to its default value. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteSqlResultsDownloadResponse` + """ + + query = {} + if etag is not None: + query["etag"] = etag + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "DELETE", "/api/2.0/settings/types/sql_results_download/names/default", query=query, headers=headers + ) + return DeleteSqlResultsDownloadResponse.from_dict(res) + + def get(self, *, etag: Optional[str] = None) -> SqlResultsDownload: + """Get the SQL Results Download setting. + + Gets the SQL Results Download setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`SqlResultsDownload` + """ + + query = {} + if etag is not None: + query["etag"] = etag + headers = { + "Accept": "application/json", + } + + res = self._api.do( + "GET", "/api/2.0/settings/types/sql_results_download/names/default", query=query, headers=headers + ) + return SqlResultsDownload.from_dict(res) + + def update(self, allow_missing: bool, setting: SqlResultsDownload, field_mask: str) -> SqlResultsDownload: + """Update the SQL Results Download setting. + + Updates the SQL Results Download setting. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`SqlResultsDownload` + :param field_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`SqlResultsDownload` + """ + body = {} + if allow_missing is not None: + body["allow_missing"] = allow_missing + if field_mask is not None: + body["field_mask"] = field_mask + if setting is not None: + body["setting"] = setting.as_dict() + headers = { + "Accept": "application/json", + "Content-Type": "application/json", + } + + res = self._api.do( + "PATCH", "/api/2.0/settings/types/sql_results_download/names/default", body=body, headers=headers + ) + return SqlResultsDownload.from_dict(res) + class TokenManagementAPI: """Enables administrators to get all tokens and delete tokens for other users. Admins can either get every @@ -9414,20 +10298,20 @@ def set_status(self, contents: Dict[str, str]): class WorkspaceNetworkConfigurationAPI: - """These APIs allow configuration of network settings for Databricks workspaces. Each workspace is always - associated with exactly one network policy that controls which network destinations can be accessed from - the Databricks environment. By default, workspaces are associated with the 'default-policy' network - policy. You cannot create or delete a workspace's network configuration, only update it to associate the - workspace with a different policy.""" + """These APIs allow configuration of network settings for Databricks workspaces by selecting which network + policy to associate with the workspace. Each workspace is always associated with exactly one network + policy that controls which network destinations can be accessed from the Databricks environment. By + default, workspaces are associated with the 'default-policy' network policy. You cannot create or delete a + workspace's network option, only update it to associate the workspace with a different policy""" def __init__(self, api_client): self._api = api_client def get_workspace_network_option_rpc(self, workspace_id: int) -> WorkspaceNetworkOption: - """Get workspace network configuration. + """Get workspace network option. - Gets the network configuration for a workspace. Every workspace has exactly one network policy - binding, with 'default-policy' used if no explicit assignment exists. + Gets the network option for a workspace. Every workspace has exactly one network policy binding, with + 'default-policy' used if no explicit assignment exists. :param workspace_id: int The workspace ID. @@ -9447,11 +10331,10 @@ def get_workspace_network_option_rpc(self, workspace_id: int) -> WorkspaceNetwor def update_workspace_network_option_rpc( self, workspace_id: int, workspace_network_option: WorkspaceNetworkOption ) -> WorkspaceNetworkOption: - """Update workspace network configuration. + """Update workspace network option. - Updates the network configuration for a workspace. This operation associates the workspace with the - specified network policy. To revert to the default policy, specify 'default-policy' as the - network_policy_id. + Updates the network option for a workspace. This operation associates the workspace with the specified + network policy. To revert to the default policy, specify 'default-policy' as the network_policy_id. :param workspace_id: int The workspace ID. diff --git a/databricks/sdk/service/sharing.py b/databricks/sdk/service/sharing.py index 09bf080f5..f72682702 100755 --- a/databricks/sdk/service/sharing.py +++ b/databricks/sdk/service/sharing.py @@ -22,6 +22,7 @@ class AuthenticationType(Enum): DATABRICKS = "DATABRICKS" OAUTH_CLIENT_CREDENTIALS = "OAUTH_CLIENT_CREDENTIALS" + OIDC_FEDERATION = "OIDC_FEDERATION" TOKEN = "TOKEN" @@ -2763,7 +2764,7 @@ def from_dict(cls, d: Dict[str, Any]) -> UpdateShare: @dataclass class UpdateSharePermissions: changes: Optional[List[PermissionsChange]] = None - """Array of permission changes.""" + """Array of permissions change objects.""" name: Optional[str] = None """The name of the share.""" @@ -3999,7 +4000,7 @@ def update_permissions( :param name: str The name of the share. :param changes: List[:class:`PermissionsChange`] (optional) - Array of permission changes. + Array of permissions change objects. :param omit_permissions_list: bool (optional) Optional. Whether to return the latest permissions list of the share in the response. diff --git a/databricks/sdk/service/sql.py b/databricks/sdk/service/sql.py index 0cef4c2e4..0e23b7a47 100755 --- a/databricks/sdk/service/sql.py +++ b/databricks/sdk/service/sql.py @@ -1696,6 +1696,68 @@ def from_dict(cls, d: Dict[str, Any]) -> CreateQueryRequestQuery: ) +@dataclass +class CreateQueryVisualizationsLegacyRequest: + """Add visualization to a query""" + + query_id: str + """The identifier returned by :method:queries/create""" + + type: str + """The type of visualization: chart, table, pivot table, and so on.""" + + options: Any + """The options object varies widely from one visualization type to the next and is unsupported. + Databricks does not recommend modifying visualization settings in JSON.""" + + description: Optional[str] = None + """A short description of this visualization. This is not displayed in the UI.""" + + name: Optional[str] = None + """The name of the visualization that appears on dashboards and the query screen.""" + + def as_dict(self) -> dict: + """Serializes the CreateQueryVisualizationsLegacyRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.description is not None: + body["description"] = self.description + if self.name is not None: + body["name"] = self.name + if self.options: + body["options"] = self.options + if self.query_id is not None: + body["query_id"] = self.query_id + if self.type is not None: + body["type"] = self.type + return body + + def as_shallow_dict(self) -> dict: + """Serializes the CreateQueryVisualizationsLegacyRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.description is not None: + body["description"] = self.description + if self.name is not None: + body["name"] = self.name + if self.options: + body["options"] = self.options + if self.query_id is not None: + body["query_id"] = self.query_id + if self.type is not None: + body["type"] = self.type + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> CreateQueryVisualizationsLegacyRequest: + """Deserializes the CreateQueryVisualizationsLegacyRequest from a dictionary.""" + return cls( + description=d.get("description", None), + name=d.get("name", None), + options=d.get("options", None), + query_id=d.get("query_id", None), + type=d.get("type", None), + ) + + @dataclass class CreateVisualizationRequest: visualization: Optional[CreateVisualizationRequestVisualization] = None @@ -6047,6 +6109,10 @@ class QueryMetrics: spill_to_disk_bytes: Optional[int] = None """Size of data temporarily written to disk while executing the query, in bytes.""" + task_time_over_time_range: Optional[TaskTimeOverRange] = None + """sum of task times completed in a range of wall clock time, approximated to a configurable number + of points aggregated over all stages and jobs in the query (based on task_total_time_ms)""" + task_total_time_ms: Optional[int] = None """Sum of execution time for all of the query’s tasks, in milliseconds.""" @@ -6097,6 +6163,8 @@ def as_dict(self) -> dict: body["rows_read_count"] = self.rows_read_count if self.spill_to_disk_bytes is not None: body["spill_to_disk_bytes"] = self.spill_to_disk_bytes + if self.task_time_over_time_range: + body["task_time_over_time_range"] = self.task_time_over_time_range.as_dict() if self.task_total_time_ms is not None: body["task_total_time_ms"] = self.task_total_time_ms if self.total_time_ms is not None: @@ -6146,6 +6214,8 @@ def as_shallow_dict(self) -> dict: body["rows_read_count"] = self.rows_read_count if self.spill_to_disk_bytes is not None: body["spill_to_disk_bytes"] = self.spill_to_disk_bytes + if self.task_time_over_time_range: + body["task_time_over_time_range"] = self.task_time_over_time_range if self.task_total_time_ms is not None: body["task_total_time_ms"] = self.task_total_time_ms if self.total_time_ms is not None: @@ -6177,6 +6247,7 @@ def from_dict(cls, d: Dict[str, Any]) -> QueryMetrics: rows_produced_count=d.get("rows_produced_count", None), rows_read_count=d.get("rows_read_count", None), spill_to_disk_bytes=d.get("spill_to_disk_bytes", None), + task_time_over_time_range=_from_dict(d, "task_time_over_time_range", TaskTimeOverRange), task_total_time_ms=d.get("task_total_time_ms", None), total_time_ms=d.get("total_time_ms", None), write_remote_bytes=d.get("write_remote_bytes", None), @@ -6763,6 +6834,50 @@ class ServiceErrorCode(Enum): WORKSPACE_TEMPORARILY_UNAVAILABLE = "WORKSPACE_TEMPORARILY_UNAVAILABLE" +@dataclass +class SetRequest: + """Set object ACL""" + + access_control_list: Optional[List[AccessControl]] = None + + object_id: Optional[str] = None + """Object ID. The ACL for the object with this UUID is overwritten by this request's POST content.""" + + object_type: Optional[ObjectTypePlural] = None + """The type of object permission to set.""" + + def as_dict(self) -> dict: + """Serializes the SetRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.access_control_list: + body["access_control_list"] = [v.as_dict() for v in self.access_control_list] + if self.object_id is not None: + body["objectId"] = self.object_id + if self.object_type is not None: + body["objectType"] = self.object_type.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the SetRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.access_control_list: + body["access_control_list"] = self.access_control_list + if self.object_id is not None: + body["objectId"] = self.object_id + if self.object_type is not None: + body["objectType"] = self.object_type + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> SetRequest: + """Deserializes the SetRequest from a dictionary.""" + return cls( + access_control_list=_repeated_dict(d, "access_control_list", AccessControl), + object_id=d.get("objectId", None), + object_type=_enum(d, "objectType", ObjectTypePlural), + ) + + @dataclass class SetResponse: access_control_list: Optional[List[AccessControl]] = None @@ -7167,6 +7282,63 @@ class SuccessMessage(Enum): SUCCESS = "Success" +@dataclass +class TaskTimeOverRange: + entries: Optional[List[TaskTimeOverRangeEntry]] = None + + interval: Optional[int] = None + """interval length for all entries (difference in start time and end time of an entry range) the + same for all entries start time of first interval is query_start_time_ms""" + + def as_dict(self) -> dict: + """Serializes the TaskTimeOverRange into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.entries: + body["entries"] = [v.as_dict() for v in self.entries] + if self.interval is not None: + body["interval"] = self.interval + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TaskTimeOverRange into a shallow dictionary of its immediate attributes.""" + body = {} + if self.entries: + body["entries"] = self.entries + if self.interval is not None: + body["interval"] = self.interval + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TaskTimeOverRange: + """Deserializes the TaskTimeOverRange from a dictionary.""" + return cls(entries=_repeated_dict(d, "entries", TaskTimeOverRangeEntry), interval=d.get("interval", None)) + + +@dataclass +class TaskTimeOverRangeEntry: + task_completed_time_ms: Optional[int] = None + """total task completion time in this time range, aggregated over all stages and jobs in the query""" + + def as_dict(self) -> dict: + """Serializes the TaskTimeOverRangeEntry into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.task_completed_time_ms is not None: + body["task_completed_time_ms"] = self.task_completed_time_ms + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TaskTimeOverRangeEntry into a shallow dictionary of its immediate attributes.""" + body = {} + if self.task_completed_time_ms is not None: + body["task_completed_time_ms"] = self.task_completed_time_ms + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TaskTimeOverRangeEntry: + """Deserializes the TaskTimeOverRangeEntry from a dictionary.""" + return cls(task_completed_time_ms=d.get("task_completed_time_ms", None)) + + @dataclass class TerminationReason: code: Optional[TerminationReasonCode] = None @@ -7384,6 +7556,51 @@ def from_dict(cls, d: Dict[str, Any]) -> TransferOwnershipObjectId: return cls(new_owner=d.get("new_owner", None)) +@dataclass +class TransferOwnershipRequest: + """Transfer object ownership""" + + new_owner: Optional[str] = None + """Email address for the new owner, who must exist in the workspace.""" + + object_id: Optional[TransferOwnershipObjectId] = None + """The ID of the object on which to change ownership.""" + + object_type: Optional[OwnableObjectType] = None + """The type of object on which to change ownership.""" + + def as_dict(self) -> dict: + """Serializes the TransferOwnershipRequest into a dictionary suitable for use as a JSON request body.""" + body = {} + if self.new_owner is not None: + body["new_owner"] = self.new_owner + if self.object_id: + body["objectId"] = self.object_id.as_dict() + if self.object_type is not None: + body["objectType"] = self.object_type.value + return body + + def as_shallow_dict(self) -> dict: + """Serializes the TransferOwnershipRequest into a shallow dictionary of its immediate attributes.""" + body = {} + if self.new_owner is not None: + body["new_owner"] = self.new_owner + if self.object_id: + body["objectId"] = self.object_id + if self.object_type is not None: + body["objectType"] = self.object_type + return body + + @classmethod + def from_dict(cls, d: Dict[str, Any]) -> TransferOwnershipRequest: + """Deserializes the TransferOwnershipRequest from a dictionary.""" + return cls( + new_owner=d.get("new_owner", None), + object_id=_from_dict(d, "objectId", TransferOwnershipObjectId), + object_type=_enum(d, "objectType", OwnableObjectType), + ) + + @dataclass class UpdateAlertRequest: update_mask: str @@ -8760,7 +8977,7 @@ def update(self, alert_id: str, name: str, options: AlertOptions, query_id: str, class AlertsV2API: - """TODO: Add description""" + """New version of SQL Alerts""" def __init__(self, api_client): self._api = api_client diff --git a/docs/account/billing/log_delivery.rst b/docs/account/billing/log_delivery.rst index ec5e0a945..4f3baef60 100644 --- a/docs/account/billing/log_delivery.rst +++ b/docs/account/billing/log_delivery.rst @@ -4,54 +4,10 @@ .. py:class:: LogDeliveryAPI - These APIs manage log delivery configurations for this account. The two supported log types for this API - are _billable usage logs_ and _audit logs_. This feature is in Public Preview. This feature works with all - account ID types. - - Log delivery works with all account types. However, if your account is on the E2 version of the platform - or on a select custom plan that allows multiple workspaces per account, you can optionally configure - different storage destinations for each workspace. Log delivery status is also provided to know the latest - status of log delivery attempts. The high-level flow of billable usage delivery: - - 1. **Create storage**: In AWS, [create a new AWS S3 bucket] with a specific bucket policy. Using - Databricks APIs, call the Account API to create a [storage configuration object](:method:Storage/Create) - that uses the bucket name. 2. **Create credentials**: In AWS, create the appropriate AWS IAM role. For - full details, including the required IAM role policies and trust relationship, see [Billable usage log - delivery]. Using Databricks APIs, call the Account API to create a [credential configuration - object](:method:Credentials/Create) that uses the IAM role"s ARN. 3. **Create log delivery - configuration**: Using Databricks APIs, call the Account API to [create a log delivery - configuration](:method:LogDelivery/Create) that uses the credential and storage configuration objects from - previous steps. You can specify if the logs should include all events of that log type in your account - (_Account level_ delivery) or only events for a specific set of workspaces (_workspace level_ delivery). - Account level log delivery applies to all current and future workspaces plus account level logs, while - workspace level log delivery solely delivers logs related to the specified workspaces. You can create - multiple types of delivery configurations per account. - - For billable usage delivery: * For more information about billable usage logs, see [Billable usage log - delivery]. For the CSV schema, see the [Usage page]. * The delivery location is - `//billable-usage/csv/`, where `` is the name of the optional delivery path - prefix you set up during log delivery configuration. Files are named - `workspaceId=-usageMonth=.csv`. * All billable usage logs apply to specific - workspaces (_workspace level_ logs). You can aggregate usage for your entire account by creating an - _account level_ delivery configuration that delivers logs for all current and future workspaces in your - account. * The files are delivered daily by overwriting the month's CSV file for each workspace. - - For audit log delivery: * For more information about about audit log delivery, see [Audit log delivery], - which includes information about the used JSON schema. * The delivery location is - `//workspaceId=/date=/auditlogs_.json`. - Files may get overwritten with the same content multiple times to achieve exactly-once delivery. * If the - audit log delivery configuration included specific workspace IDs, only _workspace-level_ audit logs for - those workspaces are delivered. If the log delivery configuration applies to the entire account (_account - level_ delivery configuration), the audit log delivery includes workspace-level audit logs for all - workspaces in the account as well as account-level audit logs. See [Audit log delivery] for details. * - Auditable events are typically available in logs within 15 minutes. - - [Audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html - [Billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html - [Usage page]: https://docs.databricks.com/administration-guide/account-settings/usage.html - [create a new AWS S3 bucket]: https://docs.databricks.com/administration-guide/account-api/aws-storage.html - - .. py:method:: create( [, log_delivery_configuration: Optional[CreateLogDeliveryConfigurationParams]]) -> WrappedLogDeliveryConfiguration + These APIs manage Log delivery configurations for this account. Log delivery configs enable you to + configure the delivery of the specified type of logs to your storage account. + + .. py:method:: create(log_delivery_configuration: CreateLogDeliveryConfigurationParams) -> WrappedLogDeliveryConfiguration Usage: @@ -119,12 +75,13 @@ [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [Deliver and access billable usage logs]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html - :param log_delivery_configuration: :class:`CreateLogDeliveryConfigurationParams` (optional) + :param log_delivery_configuration: :class:`CreateLogDeliveryConfigurationParams` + * Log Delivery Configuration :returns: :class:`WrappedLogDeliveryConfiguration` - .. py:method:: get(log_delivery_configuration_id: str) -> WrappedLogDeliveryConfiguration + .. py:method:: get(log_delivery_configuration_id: str) -> GetLogDeliveryConfigurationResponse Usage: @@ -176,12 +133,12 @@ Gets a Databricks log delivery configuration object for an account, both specified by ID. :param log_delivery_configuration_id: str - Databricks log delivery configuration ID + The log delivery configuration id of customer - :returns: :class:`WrappedLogDeliveryConfiguration` + :returns: :class:`GetLogDeliveryConfigurationResponse` - .. py:method:: list( [, credentials_id: Optional[str], status: Optional[LogDeliveryConfigStatus], storage_configuration_id: Optional[str]]) -> Iterator[LogDeliveryConfiguration] + .. py:method:: list( [, credentials_id: Optional[str], page_token: Optional[str], status: Optional[LogDeliveryConfigStatus], storage_configuration_id: Optional[str]]) -> Iterator[LogDeliveryConfiguration] Usage: @@ -200,11 +157,14 @@ Gets all Databricks log delivery configurations associated with an account specified by ID. :param credentials_id: str (optional) - Filter by credential configuration ID. + The Credentials id to filter the search results with + :param page_token: str (optional) + A page token received from a previous get all budget configurations call. This token can be used to + retrieve the subsequent page. Requests first page if absent. :param status: :class:`LogDeliveryConfigStatus` (optional) - Filter by status `ENABLED` or `DISABLED`. + The log delivery status to filter the search results with :param storage_configuration_id: str (optional) - Filter by storage configuration ID. + The Storage Configuration id to filter the search results with :returns: Iterator over :class:`LogDeliveryConfiguration` @@ -219,7 +179,7 @@ under [Create log delivery](:method:LogDelivery/Create). :param log_delivery_configuration_id: str - Databricks log delivery configuration ID + The log delivery configuration id of customer :param status: :class:`LogDeliveryConfigStatus` Status of log delivery configuration. Set to `ENABLED` (enabled) or `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable the diff --git a/docs/account/iam/service_principals.rst b/docs/account/iam/service_principals.rst index e0fd8577a..302cf5f79 100644 --- a/docs/account/iam/service_principals.rst +++ b/docs/account/iam/service_principals.rst @@ -23,10 +23,7 @@ a = AccountClient() - sp_create = a.service_principals.create(active=True, display_name=f"sdk-{time.time_ns()}") - - # cleanup - a.service_principals.delete(id=sp_create.id) + spn = a.service_principals.create(display_name=f"sdk-{time.time_ns()}") Create a service principal. diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 33df20178..745bd75da 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -80,9 +80,9 @@ spn_id = spn.id - workspace_id = os.environ["DUMMY_WORKSPACE_ID"] + workspace_id = os.environ["TEST_WORKSPACE_ID"] - _ = a.workspace_assignment.update( + a.workspace_assignment.update( workspace_id=workspace_id, principal_id=spn_id, permissions=[iam.WorkspacePermission.USER], diff --git a/docs/account/provisioning/credentials.rst b/docs/account/provisioning/credentials.rst index d023d4f1f..e307588f1 100644 --- a/docs/account/provisioning/credentials.rst +++ b/docs/account/provisioning/credentials.rst @@ -24,15 +24,15 @@ a = AccountClient() - role = a.credentials.create( + creds = a.credentials.create( credentials_name=f"sdk-{time.time_ns()}", aws_credentials=provisioning.CreateCredentialAwsCredentials( - sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_CROSSACCOUNT_ARN"]) + sts_role=provisioning.CreateCredentialStsRole(role_arn=os.environ["TEST_LOGDELIVERY_ARN"]) ), ) # cleanup - a.credentials.delete(credentials_id=role.credentials_id) + a.credentials.delete(credentials_id=creds.credentials_id) Create credential configuration. diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index b8e144f8c..010795885 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -16,7 +16,6 @@ .. code-block:: - import os import time from databricks.sdk import AccountClient @@ -24,13 +23,13 @@ a = AccountClient() - storage = a.storage.create( + bucket = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", - root_bucket_info=provisioning.RootBucketInfo(bucket_name=os.environ["TEST_ROOT_BUCKET"]), + root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), ) # cleanup - a.storage.delete(storage_configuration_id=storage.storage_configuration_id) + a.storage.delete(storage_configuration_id=bucket.storage_configuration_id) Create new storage configuration. diff --git a/docs/account/settings/network_connectivity.rst b/docs/account/settings/network_connectivity.rst index 2ed5c167a..8b3a9d704 100644 --- a/docs/account/settings/network_connectivity.rst +++ b/docs/account/settings/network_connectivity.rst @@ -35,7 +35,7 @@ :returns: :class:`NetworkConnectivityConfiguration` - .. py:method:: create_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule: CreatePrivateEndpointRule) -> NccAzurePrivateEndpointRule + .. py:method:: create_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule: CreatePrivateEndpointRule) -> NccPrivateEndpointRule Create a private endpoint rule. @@ -55,7 +55,7 @@ Properties of the new private endpoint rule. Note that you must approve the endpoint in Azure portal after initialization. - :returns: :class:`NccAzurePrivateEndpointRule` + :returns: :class:`NccPrivateEndpointRule` .. py:method:: delete_network_connectivity_configuration(network_connectivity_config_id: str) @@ -70,7 +70,7 @@ - .. py:method:: delete_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule_id: str) -> NccAzurePrivateEndpointRule + .. py:method:: delete_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule_id: str) -> NccPrivateEndpointRule Delete a private endpoint rule. @@ -84,7 +84,7 @@ :param private_endpoint_rule_id: str Your private endpoint rule ID. - :returns: :class:`NccAzurePrivateEndpointRule` + :returns: :class:`NccPrivateEndpointRule` .. py:method:: get_network_connectivity_configuration(network_connectivity_config_id: str) -> NetworkConnectivityConfiguration @@ -99,7 +99,7 @@ :returns: :class:`NetworkConnectivityConfiguration` - .. py:method:: get_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule_id: str) -> NccAzurePrivateEndpointRule + .. py:method:: get_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule_id: str) -> NccPrivateEndpointRule Gets a private endpoint rule. @@ -110,7 +110,7 @@ :param private_endpoint_rule_id: str Your private endpoint rule ID. - :returns: :class:`NccAzurePrivateEndpointRule` + :returns: :class:`NccPrivateEndpointRule` .. py:method:: list_network_connectivity_configurations( [, page_token: Optional[str]]) -> Iterator[NetworkConnectivityConfiguration] @@ -125,7 +125,7 @@ :returns: Iterator over :class:`NetworkConnectivityConfiguration` - .. py:method:: list_private_endpoint_rules(network_connectivity_config_id: str [, page_token: Optional[str]]) -> Iterator[NccAzurePrivateEndpointRule] + .. py:method:: list_private_endpoint_rules(network_connectivity_config_id: str [, page_token: Optional[str]]) -> Iterator[NccPrivateEndpointRule] List private endpoint rules. @@ -136,10 +136,10 @@ :param page_token: str (optional) Pagination token to go to next page based on previous query. - :returns: Iterator over :class:`NccAzurePrivateEndpointRule` + :returns: Iterator over :class:`NccPrivateEndpointRule` - .. py:method:: update_ncc_azure_private_endpoint_rule_public(network_connectivity_config_id: str, private_endpoint_rule_id: str, private_endpoint_rule: UpdatePrivateEndpointRule, update_mask: str) -> NccAzurePrivateEndpointRule + .. py:method:: update_private_endpoint_rule(network_connectivity_config_id: str, private_endpoint_rule_id: str, private_endpoint_rule: UpdatePrivateEndpointRule, update_mask: str) -> NccPrivateEndpointRule Update a private endpoint rule. @@ -147,7 +147,8 @@ is allowed to be updated. :param network_connectivity_config_id: str - Your Network Connectivity Configuration ID. + The ID of a network connectivity configuration, which is the parent resource of this private + endpoint rule object. :param private_endpoint_rule_id: str Your private endpoint rule ID. :param private_endpoint_rule: :class:`UpdatePrivateEndpointRule` @@ -160,5 +161,5 @@ the entire collection field can be specified. Field names must exactly match the resource field names. - :returns: :class:`NccAzurePrivateEndpointRule` + :returns: :class:`NccPrivateEndpointRule` \ No newline at end of file diff --git a/docs/account/settings/workspace_network_configuration.rst b/docs/account/settings/workspace_network_configuration.rst index 98ff84202..3ed40313f 100644 --- a/docs/account/settings/workspace_network_configuration.rst +++ b/docs/account/settings/workspace_network_configuration.rst @@ -1,21 +1,21 @@ -``a.workspace_network_configuration``: Workspace Network Configuration -====================================================================== +``a.workspace_network_configuration``: Workspace Network Option +=============================================================== .. currentmodule:: databricks.sdk.service.settings .. py:class:: WorkspaceNetworkConfigurationAPI - These APIs allow configuration of network settings for Databricks workspaces. Each workspace is always - associated with exactly one network policy that controls which network destinations can be accessed from - the Databricks environment. By default, workspaces are associated with the 'default-policy' network - policy. You cannot create or delete a workspace's network configuration, only update it to associate the - workspace with a different policy. + These APIs allow configuration of network settings for Databricks workspaces by selecting which network + policy to associate with the workspace. Each workspace is always associated with exactly one network + policy that controls which network destinations can be accessed from the Databricks environment. By + default, workspaces are associated with the 'default-policy' network policy. You cannot create or delete a + workspace's network option, only update it to associate the workspace with a different policy .. py:method:: get_workspace_network_option_rpc(workspace_id: int) -> WorkspaceNetworkOption - Get workspace network configuration. + Get workspace network option. - Gets the network configuration for a workspace. Every workspace has exactly one network policy - binding, with 'default-policy' used if no explicit assignment exists. + Gets the network option for a workspace. Every workspace has exactly one network policy binding, with + 'default-policy' used if no explicit assignment exists. :param workspace_id: int The workspace ID. @@ -25,11 +25,10 @@ .. py:method:: update_workspace_network_option_rpc(workspace_id: int, workspace_network_option: WorkspaceNetworkOption) -> WorkspaceNetworkOption - Update workspace network configuration. + Update workspace network option. - Updates the network configuration for a workspace. This operation associates the workspace with the - specified network policy. To revert to the default policy, specify 'default-policy' as the - network_policy_id. + Updates the network option for a workspace. This operation associates the workspace with the specified + network policy. To revert to the default policy, specify 'default-policy' as the network_policy_id. :param workspace_id: int The workspace ID. diff --git a/docs/dbdataclasses/aibuilder.rst b/docs/dbdataclasses/aibuilder.rst new file mode 100644 index 000000000..cb5400647 --- /dev/null +++ b/docs/dbdataclasses/aibuilder.rst @@ -0,0 +1,55 @@ +AI Builder +========== + +These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.aibuilder`` module. + +.. py:currentmodule:: databricks.sdk.service.aibuilder +.. autoclass:: CancelCustomLlmOptimizationRunRequest + :members: + :undoc-members: + +.. autoclass:: CancelResponse + :members: + :undoc-members: + +.. autoclass:: CustomLlm + :members: + :undoc-members: + +.. autoclass:: Dataset + :members: + :undoc-members: + +.. autoclass:: StartCustomLlmOptimizationRunRequest + :members: + :undoc-members: + +.. py:class:: State + + States of Custom LLM optimization lifecycle. + + .. py:attribute:: CANCELLED + :value: "CANCELLED" + + .. py:attribute:: COMPLETED + :value: "COMPLETED" + + .. py:attribute:: CREATED + :value: "CREATED" + + .. py:attribute:: FAILED + :value: "FAILED" + + .. py:attribute:: PENDING + :value: "PENDING" + + .. py:attribute:: RUNNING + :value: "RUNNING" + +.. autoclass:: Table + :members: + :undoc-members: + +.. autoclass:: UpdateCustomLlmRequest + :members: + :undoc-members: diff --git a/docs/dbdataclasses/billing.rst b/docs/dbdataclasses/billing.rst index 590fd693e..ca8408bdf 100644 --- a/docs/dbdataclasses/billing.rst +++ b/docs/dbdataclasses/billing.rst @@ -107,7 +107,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: DeliveryStatus - The status string for log delivery. Possible values are: * `CREATED`: There were no log delivery attempts since the config was created. * `SUCCEEDED`: The latest attempt of log delivery has succeeded completely. * `USER_FAILURE`: The latest attempt of log delivery failed because of misconfiguration of customer provided permissions on role or storage. * `SYSTEM_FAILURE`: The latest attempt of log delivery failed because of an Databricks internal error. Contact support if it doesn't go away soon. * `NOT_FOUND`: The log delivery status as the configuration has been disabled since the release of this feature or there are no workspaces in the account. + * The status string for log delivery. Possible values are: `CREATED`: There were no log delivery attempts since the config was created. `SUCCEEDED`: The latest attempt of log delivery has succeeded completely. `USER_FAILURE`: The latest attempt of log delivery failed because of misconfiguration of customer provided permissions on role or storage. `SYSTEM_FAILURE`: The latest attempt of log delivery failed because of an Databricks internal error. Contact support if it doesn't go away soon. `NOT_FOUND`: The log delivery status as the configuration has been disabled since the release of this feature or there are no workspaces in the account. .. py:attribute:: CREATED :value: "CREATED" @@ -140,6 +140,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GetLogDeliveryConfigurationResponse + :members: + :undoc-members: + .. autoclass:: LimitConfig :members: :undoc-members: @@ -154,7 +158,8 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: LogDeliveryConfigStatus - Status of log delivery configuration. Set to `ENABLED` (enabled) or `DISABLED` (disabled). Defaults to `ENABLED`. You can [enable or disable the configuration](#operation/patch-log-delivery-config-status) later. Deletion of a configuration is not supported, so disable a log delivery configuration that is no longer needed. + * Log Delivery Status + `ENABLED`: All dependencies have executed and succeeded `DISABLED`: At least one dependency has succeeded .. py:attribute:: DISABLED :value: "DISABLED" @@ -172,10 +177,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: LogType - Log delivery type. Supported values are: - * `BILLABLE_USAGE` — Configure [billable usage log delivery]. For the CSV schema, see the [View billable usage]. - * `AUDIT_LOGS` — Configure [audit log delivery]. For the JSON schema, see [Configure audit logging] - [Configure audit logging]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html [audit log delivery]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [billable usage log delivery]: https://docs.databricks.com/administration-guide/account-settings/billable-usage-delivery.html + * Log Delivery Type .. py:attribute:: AUDIT_LOGS :value: "AUDIT_LOGS" @@ -185,9 +187,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: OutputFormat - The file type of log delivery. - * If `log_type` is `BILLABLE_USAGE`, this value must be `CSV`. Only the CSV (comma-separated values) format is supported. For the schema, see the [View billable usage] * If `log_type` is `AUDIT_LOGS`, this value must be `JSON`. Only the JSON (JavaScript Object Notation) format is supported. For the schema, see the [Configuring audit logs]. - [Configuring audit logs]: https://docs.databricks.com/administration-guide/account-settings/audit-logs.html [View billable usage]: https://docs.databricks.com/administration-guide/account-settings/usage.html + * Log Delivery Output Format .. py:attribute:: CSV :value: "CSV" diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index efbb6d06c..5fd115b65 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -246,7 +246,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: ConnectionType - The type of connection. + Next Id: 31 .. py:attribute:: BIGQUERY :value: "BIGQUERY" @@ -254,6 +254,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DATABRICKS :value: "DATABRICKS" + .. py:attribute:: GA4_RAW_DATA + :value: "GA4_RAW_DATA" + .. py:attribute:: GLUE :value: "GLUE" @@ -272,9 +275,21 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: POSTGRESQL :value: "POSTGRESQL" + .. py:attribute:: POWER_BI + :value: "POWER_BI" + .. py:attribute:: REDSHIFT :value: "REDSHIFT" + .. py:attribute:: SALESFORCE + :value: "SALESFORCE" + + .. py:attribute:: SALESFORCE_DATA_CLOUD + :value: "SALESFORCE_DATA_CLOUD" + + .. py:attribute:: SERVICENOW + :value: "SERVICENOW" + .. py:attribute:: SNOWFLAKE :value: "SNOWFLAKE" @@ -287,6 +302,12 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: TERADATA :value: "TERADATA" + .. py:attribute:: UNKNOWN_CONNECTION_TYPE + :value: "UNKNOWN_CONNECTION_TYPE" + + .. py:attribute:: WORKDAY_RAAS + :value: "WORKDAY_RAAS" + .. autoclass:: ContinuousUpdateStatus :members: :undoc-members: @@ -402,11 +423,41 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: CredentialType - The type of credential. + Next Id: 12 .. py:attribute:: BEARER_TOKEN :value: "BEARER_TOKEN" + .. py:attribute:: OAUTH_ACCESS_TOKEN + :value: "OAUTH_ACCESS_TOKEN" + + .. py:attribute:: OAUTH_M2M + :value: "OAUTH_M2M" + + .. py:attribute:: OAUTH_REFRESH_TOKEN + :value: "OAUTH_REFRESH_TOKEN" + + .. py:attribute:: OAUTH_RESOURCE_OWNER_PASSWORD + :value: "OAUTH_RESOURCE_OWNER_PASSWORD" + + .. py:attribute:: OAUTH_U2M + :value: "OAUTH_U2M" + + .. py:attribute:: OAUTH_U2M_MAPPING + :value: "OAUTH_U2M_MAPPING" + + .. py:attribute:: OIDC_TOKEN + :value: "OIDC_TOKEN" + + .. py:attribute:: PEM_PRIVATE_KEY + :value: "PEM_PRIVATE_KEY" + + .. py:attribute:: SERVICE_CREDENTIAL + :value: "SERVICE_CREDENTIAL" + + .. py:attribute:: UNKNOWN_CREDENTIAL_TYPE + :value: "UNKNOWN_CREDENTIAL_TYPE" + .. py:attribute:: USERNAME_PASSWORD :value: "USERNAME_PASSWORD" @@ -487,34 +538,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: WORKDAY_RAAS_FORMAT :value: "WORKDAY_RAAS_FORMAT" -.. autoclass:: DatabaseCatalog - :members: - :undoc-members: - -.. autoclass:: DatabaseInstance - :members: - :undoc-members: - -.. py:class:: DatabaseInstanceState - - .. py:attribute:: AVAILABLE - :value: "AVAILABLE" - - .. py:attribute:: DELETING - :value: "DELETING" - - .. py:attribute:: FAILING_OVER - :value: "FAILING_OVER" - - .. py:attribute:: STARTING - :value: "STARTING" - - .. py:attribute:: STOPPED - :value: "STOPPED" - - .. py:attribute:: UPDATING - :value: "UPDATING" - .. autoclass:: DatabricksGcpServiceAccount :members: :undoc-members: @@ -535,25 +558,21 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: DeleteDatabaseCatalogResponse +.. autoclass:: DeleteResponse :members: :undoc-members: -.. autoclass:: DeleteDatabaseInstanceResponse +.. autoclass:: DeltaRuntimePropertiesKvPairs :members: :undoc-members: -.. autoclass:: DeleteResponse - :members: - :undoc-members: +.. py:class:: DeltaSharingScopeEnum -.. autoclass:: DeleteSyncedDatabaseTableResponse - :members: - :undoc-members: + .. py:attribute:: INTERNAL + :value: "INTERNAL" -.. autoclass:: DeltaRuntimePropertiesKvPairs - :members: - :undoc-members: + .. py:attribute:: INTERNAL_AND_EXTERNAL + :value: "INTERNAL_AND_EXTERNAL" .. autoclass:: Dependency :members: @@ -738,15 +757,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: GetMetastoreSummaryResponseDeltaSharingScope - - The scope of Delta Sharing enabled for the metastore. - - .. py:attribute:: INTERNAL - :value: "INTERNAL" - - .. py:attribute:: INTERNAL_AND_EXTERNAL - :value: "INTERNAL_AND_EXTERNAL" +.. autoclass:: GetPermissionsResponse + :members: + :undoc-members: .. autoclass:: GetQuotaResponse :members: @@ -784,10 +797,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListDatabaseInstancesResponse - :members: - :undoc-members: - .. autoclass:: ListExternalLocationsResponse :members: :undoc-members: @@ -851,16 +860,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: MetastoreInfoDeltaSharingScope - - The scope of Delta Sharing enabled for the metastore. - - .. py:attribute:: INTERNAL - :value: "INTERNAL" - - .. py:attribute:: INTERNAL_AND_EXTERNAL - :value: "INTERNAL_AND_EXTERNAL" - .. autoclass:: ModelVersionInfo :members: :undoc-members: @@ -1007,10 +1006,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: NewPipelineSpec - :members: - :undoc-members: - .. autoclass:: OnlineTable :members: :undoc-members: @@ -1072,10 +1067,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: PermissionsList - :members: - :undoc-members: - .. autoclass:: PipelineProgress :members: :undoc-members: @@ -1375,25 +1366,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: SyncedDatabaseTable - :members: - :undoc-members: - -.. py:class:: SyncedTableSchedulingPolicy - - .. py:attribute:: CONTINUOUS - :value: "CONTINUOUS" - - .. py:attribute:: SNAPSHOT - :value: "SNAPSHOT" - - .. py:attribute:: TRIGGERED - :value: "TRIGGERED" - -.. autoclass:: SyncedTableSpec - :members: - :undoc-members: - .. autoclass:: SystemSchemaInfo :members: :undoc-members: @@ -1508,16 +1480,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: UpdateMetastoreDeltaSharingScope - - The scope of Delta Sharing enabled for the metastore. - - .. py:attribute:: INTERNAL - :value: "INTERNAL" - - .. py:attribute:: INTERNAL_AND_EXTERNAL - :value: "INTERNAL_AND_EXTERNAL" - .. autoclass:: UpdateModelVersionRequest :members: :undoc-members: @@ -1530,6 +1492,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdatePermissionsResponse + :members: + :undoc-members: + .. autoclass:: UpdateRegisteredModelRequest :members: :undoc-members: @@ -1546,6 +1512,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateTableRequest + :members: + :undoc-members: + .. autoclass:: UpdateVolumeRequestContent :members: :undoc-members: diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index fcee1a56c..54b17b745 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -516,6 +516,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: AUTOSCALING_STATS_REPORT :value: "AUTOSCALING_STATS_REPORT" + .. py:attribute:: CLUSTER_MIGRATED + :value: "CLUSTER_MIGRATED" + .. py:attribute:: CREATING :value: "CREATING" @@ -1328,6 +1331,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DRIVER_UNEXPECTED_FAILURE :value: "DRIVER_UNEXPECTED_FAILURE" + .. py:attribute:: DRIVER_UNHEALTHY + :value: "DRIVER_UNHEALTHY" + .. py:attribute:: DRIVER_UNREACHABLE :value: "DRIVER_UNREACHABLE" diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index 5ac3f0f4b..c2ddc82f5 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -12,14 +12,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CancelQueryExecutionResponse - :members: - :undoc-members: - -.. autoclass:: CancelQueryExecutionResponseStatus - :members: - :undoc-members: - .. autoclass:: CronSchedule :members: :undoc-members: @@ -41,18 +33,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: Empty - :members: - :undoc-members: - -.. autoclass:: ExecutePublishedDashboardQueryRequest - :members: - :undoc-members: - -.. autoclass:: ExecuteQueryResponse - :members: - :undoc-members: - .. autoclass:: GenieAttachment :members: :undoc-members: @@ -77,6 +57,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GenieListSpacesResponse + :members: + :undoc-members: + .. autoclass:: GenieMessage :members: :undoc-members: @@ -101,10 +85,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: GetPublishedDashboardEmbeddedResponse - :members: - :undoc-members: - .. autoclass:: GetPublishedDashboardTokenInfoResponse :members: :undoc-members: @@ -332,18 +312,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: PendingStatus - :members: - :undoc-members: - -.. autoclass:: PollQueryStatusResponse - :members: - :undoc-members: - -.. autoclass:: PollQueryStatusResponseData - :members: - :undoc-members: - .. autoclass:: PublishRequest :members: :undoc-members: @@ -352,10 +320,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: QueryResponseStatus - :members: - :undoc-members: - .. autoclass:: Result :members: :undoc-members: @@ -388,10 +352,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: SuccessStatus - :members: - :undoc-members: - .. autoclass:: TextAttachment :members: :undoc-members: diff --git a/docs/dbdataclasses/database.rst b/docs/dbdataclasses/database.rst new file mode 100644 index 000000000..86340b5ef --- /dev/null +++ b/docs/dbdataclasses/database.rst @@ -0,0 +1,169 @@ +Database Instances +================== + +These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.database`` module. + +.. py:currentmodule:: databricks.sdk.service.database +.. autoclass:: DatabaseCatalog + :members: + :undoc-members: + +.. autoclass:: DatabaseCredential + :members: + :undoc-members: + +.. autoclass:: DatabaseInstance + :members: + :undoc-members: + +.. py:class:: DatabaseInstanceState + + .. py:attribute:: AVAILABLE + :value: "AVAILABLE" + + .. py:attribute:: DELETING + :value: "DELETING" + + .. py:attribute:: FAILING_OVER + :value: "FAILING_OVER" + + .. py:attribute:: STARTING + :value: "STARTING" + + .. py:attribute:: STOPPED + :value: "STOPPED" + + .. py:attribute:: UPDATING + :value: "UPDATING" + +.. autoclass:: DatabaseTable + :members: + :undoc-members: + +.. autoclass:: DeleteDatabaseCatalogResponse + :members: + :undoc-members: + +.. autoclass:: DeleteDatabaseInstanceResponse + :members: + :undoc-members: + +.. autoclass:: DeleteDatabaseTableResponse + :members: + :undoc-members: + +.. autoclass:: DeleteSyncedDatabaseTableResponse + :members: + :undoc-members: + +.. autoclass:: GenerateDatabaseCredentialRequest + :members: + :undoc-members: + +.. autoclass:: ListDatabaseInstancesResponse + :members: + :undoc-members: + +.. autoclass:: NewPipelineSpec + :members: + :undoc-members: + +.. py:class:: ProvisioningInfoState + + .. py:attribute:: ACTIVE + :value: "ACTIVE" + + .. py:attribute:: DEGRADED + :value: "DEGRADED" + + .. py:attribute:: DELETING + :value: "DELETING" + + .. py:attribute:: FAILED + :value: "FAILED" + + .. py:attribute:: PROVISIONING + :value: "PROVISIONING" + + .. py:attribute:: UPDATING + :value: "UPDATING" + +.. autoclass:: SyncedDatabaseTable + :members: + :undoc-members: + +.. autoclass:: SyncedTableContinuousUpdateStatus + :members: + :undoc-members: + +.. autoclass:: SyncedTableFailedStatus + :members: + :undoc-members: + +.. autoclass:: SyncedTablePipelineProgress + :members: + :undoc-members: + +.. autoclass:: SyncedTableProvisioningStatus + :members: + :undoc-members: + +.. py:class:: SyncedTableSchedulingPolicy + + .. py:attribute:: CONTINUOUS + :value: "CONTINUOUS" + + .. py:attribute:: SNAPSHOT + :value: "SNAPSHOT" + + .. py:attribute:: TRIGGERED + :value: "TRIGGERED" + +.. autoclass:: SyncedTableSpec + :members: + :undoc-members: + +.. py:class:: SyncedTableState + + The state of a synced table. + + .. py:attribute:: SYNCED_TABLED_OFFLINE + :value: "SYNCED_TABLED_OFFLINE" + + .. py:attribute:: SYNCED_TABLE_OFFLINE_FAILED + :value: "SYNCED_TABLE_OFFLINE_FAILED" + + .. py:attribute:: SYNCED_TABLE_ONLINE + :value: "SYNCED_TABLE_ONLINE" + + .. py:attribute:: SYNCED_TABLE_ONLINE_CONTINUOUS_UPDATE + :value: "SYNCED_TABLE_ONLINE_CONTINUOUS_UPDATE" + + .. py:attribute:: SYNCED_TABLE_ONLINE_NO_PENDING_UPDATE + :value: "SYNCED_TABLE_ONLINE_NO_PENDING_UPDATE" + + .. py:attribute:: SYNCED_TABLE_ONLINE_PIPELINE_FAILED + :value: "SYNCED_TABLE_ONLINE_PIPELINE_FAILED" + + .. py:attribute:: SYNCED_TABLE_ONLINE_TRIGGERED_UPDATE + :value: "SYNCED_TABLE_ONLINE_TRIGGERED_UPDATE" + + .. py:attribute:: SYNCED_TABLE_ONLINE_UPDATING_PIPELINE_RESOURCES + :value: "SYNCED_TABLE_ONLINE_UPDATING_PIPELINE_RESOURCES" + + .. py:attribute:: SYNCED_TABLE_PROVISIONING + :value: "SYNCED_TABLE_PROVISIONING" + + .. py:attribute:: SYNCED_TABLE_PROVISIONING_INITIAL_SNAPSHOT + :value: "SYNCED_TABLE_PROVISIONING_INITIAL_SNAPSHOT" + + .. py:attribute:: SYNCED_TABLE_PROVISIONING_PIPELINE_RESOURCES + :value: "SYNCED_TABLE_PROVISIONING_PIPELINE_RESOURCES" + +.. autoclass:: SyncedTableStatus + :members: + :undoc-members: + +.. autoclass:: SyncedTableTriggeredUpdateStatus + :members: + :undoc-members: diff --git a/docs/dbdataclasses/index.rst b/docs/dbdataclasses/index.rst index 3ecb9c13f..ca9fd5a4d 100644 --- a/docs/dbdataclasses/index.rst +++ b/docs/dbdataclasses/index.rst @@ -5,12 +5,14 @@ Dataclasses .. toctree:: :maxdepth: 1 + aibuilder apps billing catalog cleanrooms compute dashboards + database files iam jobs @@ -19,6 +21,7 @@ Dataclasses oauth2 pipelines provisioning + qualitymonitorv2 serving settings sharing diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index 670e83685..4046dabe3 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -200,6 +200,40 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DbtCloudJobRunStep + :members: + :undoc-members: + +.. py:class:: DbtCloudRunStatus + + Response enumeration from calling the dbt Cloud API, for inclusion in output + + .. py:attribute:: CANCELLED + :value: "CANCELLED" + + .. py:attribute:: ERROR + :value: "ERROR" + + .. py:attribute:: QUEUED + :value: "QUEUED" + + .. py:attribute:: RUNNING + :value: "RUNNING" + + .. py:attribute:: STARTING + :value: "STARTING" + + .. py:attribute:: SUCCESS + :value: "SUCCESS" + +.. autoclass:: DbtCloudTask + :members: + :undoc-members: + +.. autoclass:: DbtCloudTaskOutput + :members: + :undoc-members: + .. autoclass:: DbtOutput :members: :undoc-members: @@ -244,6 +278,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: FileArrivalTriggerState + :members: + :undoc-members: + .. autoclass:: ForEachStats :members: :undoc-members: @@ -1071,6 +1109,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: TriggerStateProto + :members: + :undoc-members: + .. py:class:: TriggerType The type of trigger that fired this run. diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index 1778c5837..0891291c4 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -61,30 +61,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ArtifactCredentialInfo - :members: - :undoc-members: - -.. autoclass:: ArtifactCredentialInfoHttpHeader - :members: - :undoc-members: - -.. py:class:: ArtifactCredentialType - - The type of a given artifact access credential - - .. py:attribute:: AWS_PRESIGNED_URL - :value: "AWS_PRESIGNED_URL" - - .. py:attribute:: AZURE_ADLS_GEN2_SAS_URI - :value: "AZURE_ADLS_GEN2_SAS_URI" - - .. py:attribute:: AZURE_SAS_URI - :value: "AZURE_SAS_URI" - - .. py:attribute:: GCP_SIGNED_URL - :value: "GCP_SIGNED_URL" - .. py:class:: CommentActivityAction An action that a user (with sufficient permissions) could take on a comment. Valid values are: * `EDIT_COMMENT`: Edit the comment @@ -340,14 +316,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SUCCEEDED :value: "SUCCEEDED" -.. autoclass:: GetCredentialsForTraceDataDownloadResponse - :members: - :undoc-members: - -.. autoclass:: GetCredentialsForTraceDataUploadResponse - :members: - :undoc-members: - .. autoclass:: GetExperimentByNameResponse :members: :undoc-members: @@ -424,10 +392,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListLoggedModelArtifactsResponse - :members: - :undoc-members: - .. autoclass:: ListModelsResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index fbad8a4f3..f4618951e 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -165,6 +165,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SQLSERVER :value: "SQLSERVER" + .. py:attribute:: TERADATA + :value: "TERADATA" + .. py:attribute:: WORKDAY_RAAS :value: "WORKDAY_RAAS" diff --git a/docs/dbdataclasses/qualitymonitorv2.rst b/docs/dbdataclasses/qualitymonitorv2.rst new file mode 100644 index 000000000..fbe2746ce --- /dev/null +++ b/docs/dbdataclasses/qualitymonitorv2.rst @@ -0,0 +1,49 @@ +Quality Monitor +=============== + +These dataclasses are used in the SDK to represent API requests and responses for services in the ``databricks.sdk.service.qualitymonitorv2`` module. + +.. py:currentmodule:: databricks.sdk.service.qualitymonitorv2 +.. autoclass:: AnomalyDetectionConfig + :members: + :undoc-members: + +.. py:class:: AnomalyDetectionRunStatus + + Status of Anomaly Detection Job Run + + .. py:attribute:: ANOMALY_DETECTION_RUN_STATUS_CANCELED + :value: "ANOMALY_DETECTION_RUN_STATUS_CANCELED" + + .. py:attribute:: ANOMALY_DETECTION_RUN_STATUS_FAILED + :value: "ANOMALY_DETECTION_RUN_STATUS_FAILED" + + .. py:attribute:: ANOMALY_DETECTION_RUN_STATUS_JOB_DELETED + :value: "ANOMALY_DETECTION_RUN_STATUS_JOB_DELETED" + + .. py:attribute:: ANOMALY_DETECTION_RUN_STATUS_PENDING + :value: "ANOMALY_DETECTION_RUN_STATUS_PENDING" + + .. py:attribute:: ANOMALY_DETECTION_RUN_STATUS_RUNNING + :value: "ANOMALY_DETECTION_RUN_STATUS_RUNNING" + + .. py:attribute:: ANOMALY_DETECTION_RUN_STATUS_SUCCESS + :value: "ANOMALY_DETECTION_RUN_STATUS_SUCCESS" + + .. py:attribute:: ANOMALY_DETECTION_RUN_STATUS_UNKNOWN + :value: "ANOMALY_DETECTION_RUN_STATUS_UNKNOWN" + + .. py:attribute:: ANOMALY_DETECTION_RUN_STATUS_WORKSPACE_MISMATCH_ERROR + :value: "ANOMALY_DETECTION_RUN_STATUS_WORKSPACE_MISMATCH_ERROR" + +.. autoclass:: DeleteQualityMonitorResponse + :members: + :undoc-members: + +.. autoclass:: ListQualityMonitorResponse + :members: + :undoc-members: + +.. autoclass:: QualityMonitor + :members: + :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index 0917028d7..0f97314d2 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -212,6 +212,31 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRule + :members: + :undoc-members: + +.. py:class:: CustomerFacingNetworkConnectivityConfigAwsPrivateEndpointRulePrivateLinkConnectionState + + .. py:attribute:: DISCONNECTED + :value: "DISCONNECTED" + + .. py:attribute:: ESTABLISHED + :value: "ESTABLISHED" + + .. py:attribute:: EXPIRED + :value: "EXPIRED" + + .. py:attribute:: PENDING + :value: "PENDING" + + .. py:attribute:: REJECTED + :value: "REJECTED" + +.. autoclass:: DashboardEmailSubscriptions + :members: + :undoc-members: + .. autoclass:: DefaultNamespaceSetting :members: :undoc-members: @@ -228,6 +253,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeleteDashboardEmailSubscriptionsResponse + :members: + :undoc-members: + .. autoclass:: DeleteDefaultNamespaceSettingResponse :members: :undoc-members: @@ -268,6 +297,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeleteSqlResultsDownloadResponse + :members: + :undoc-members: + .. py:class:: DestinationType .. py:attribute:: EMAIL @@ -521,10 +554,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListNccAzurePrivateEndpointRulesResponse - :members: - :undoc-members: - .. autoclass:: ListNetworkConnectivityConfigurationsResponse :members: :undoc-members: @@ -541,6 +570,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListPrivateEndpointRulesResponse + :members: + :undoc-members: + .. autoclass:: ListPublicTokensResponse :members: :undoc-members: @@ -620,6 +653,27 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: NccPrivateEndpointRule + :members: + :undoc-members: + +.. py:class:: NccPrivateEndpointRulePrivateLinkConnectionState + + .. py:attribute:: DISCONNECTED + :value: "DISCONNECTED" + + .. py:attribute:: ESTABLISHED + :value: "ESTABLISHED" + + .. py:attribute:: EXPIRED + :value: "EXPIRED" + + .. py:attribute:: PENDING + :value: "PENDING" + + .. py:attribute:: REJECTED + :value: "REJECTED" + .. autoclass:: NetworkConnectivityConfiguration :members: :undoc-members: @@ -702,6 +756,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: SqlResultsDownload + :members: + :undoc-members: + .. autoclass:: StringMessage :members: :undoc-members: @@ -784,6 +842,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateDashboardEmailSubscriptionsRequest + :members: + :undoc-members: + .. autoclass:: UpdateDefaultNamespaceSettingRequest :members: :undoc-members: @@ -856,6 +918,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateSqlResultsDownloadRequest + :members: + :undoc-members: + .. autoclass:: WorkspaceNetworkOption :members: :undoc-members: diff --git a/docs/dbdataclasses/sharing.rst b/docs/dbdataclasses/sharing.rst index 2c79baa22..cd1cc8b92 100644 --- a/docs/dbdataclasses/sharing.rst +++ b/docs/dbdataclasses/sharing.rst @@ -14,6 +14,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: OAUTH_CLIENT_CREDENTIALS :value: "OAUTH_CLIENT_CREDENTIALS" + .. py:attribute:: OIDC_FEDERATION + :value: "OIDC_FEDERATION" + .. py:attribute:: TOKEN :value: "TOKEN" diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index 60712bc2e..2c2578d90 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -302,6 +302,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CreateQueryVisualizationsLegacyRequest + :members: + :undoc-members: + .. autoclass:: CreateVisualizationRequest :members: :undoc-members: @@ -1029,6 +1033,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: WORKSPACE_TEMPORARILY_UNAVAILABLE :value: "WORKSPACE_TEMPORARILY_UNAVAILABLE" +.. autoclass:: SetRequest + :members: + :undoc-members: + .. autoclass:: SetResponse :members: :undoc-members: @@ -1156,6 +1164,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SUCCESS :value: "SUCCESS" +.. autoclass:: TaskTimeOverRange + :members: + :undoc-members: + +.. autoclass:: TaskTimeOverRangeEntry + :members: + :undoc-members: + .. autoclass:: TerminationReason :members: :undoc-members: @@ -1429,6 +1445,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: TransferOwnershipRequest + :members: + :undoc-members: + .. autoclass:: UpdateAlertRequest :members: :undoc-members: diff --git a/docs/gen-client-docs.py b/docs/gen-client-docs.py index 6ebfa7bab..66235d26c 100644 --- a/docs/gen-client-docs.py +++ b/docs/gen-client-docs.py @@ -249,6 +249,21 @@ class Generator: Package("marketplace", "Marketplace", "Manage AI and analytics assets such as ML models, notebooks, applications in an open marketplace"), Package("apps", "Apps", "Build custom applications on Databricks"), Package("cleanrooms", "Clean Rooms", "Manage clean rooms and their assets and task runs"), + Package( + "qualitymonitorv2", + "Quality Monitor", + "Manage quality monitor on Unity Catalog objects." + ), + Package( + "database", + "Database Instances", + "Create Database Instances and manage their configurations, including integrations with Unity Catalog" + ), + Package( + "aibuilder", + "AI Builder", + "Create and manage AI Builder resources." + ) ] def __init__(self): @@ -277,11 +292,11 @@ def _load_mapping(self) -> dict[str, Tag]: key = f"{key}.{clean_parent_service}" key = f"{key}.{tag['x-databricks-service']}".lower() - + package = tag['x-databricks-package'] t = Tag(name=tag['name'], service=tag['x-databricks-service'], is_account=tag.get('x-databricks-is-accounts', False), - package=pkgs[tag['x-databricks-package']]) + package=pkgs[package]) mapping[key] = t return mapping diff --git a/docs/workspace/aibuilder/custom_llms.rst b/docs/workspace/aibuilder/custom_llms.rst new file mode 100644 index 000000000..4f7035869 --- /dev/null +++ b/docs/workspace/aibuilder/custom_llms.rst @@ -0,0 +1,61 @@ +``w.custom_llms``: Custom LLMs Service +====================================== +.. currentmodule:: databricks.sdk.service.aibuilder + +.. py:class:: CustomLlmsAPI + + The Custom LLMs service manages state and powers the UI for the Custom LLM product. + + .. py:method:: cancel(id: str) + + Cancel a Custom LLM Optimization Run. + + :param id: str + + + + + .. py:method:: create(id: str) -> CustomLlm + + Start a Custom LLM Optimization Run. + + :param id: str + The Id of the tile. + + :returns: :class:`CustomLlm` + + + .. py:method:: get(id: str) -> CustomLlm + + Get a Custom LLM. + + :param id: str + The id of the custom llm + + :returns: :class:`CustomLlm` + + + .. py:method:: update(id: str, custom_llm: CustomLlm, update_mask: str) -> CustomLlm + + Update a Custom LLM. + + :param id: str + The id of the custom llm + :param custom_llm: :class:`CustomLlm` + The CustomLlm containing the fields which should be updated. + :param update_mask: str + The list of the CustomLlm fields to update. These should correspond to the values (or lack thereof) + present in `custom_llm`. + + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`CustomLlm` + \ No newline at end of file diff --git a/docs/workspace/aibuilder/index.rst b/docs/workspace/aibuilder/index.rst new file mode 100644 index 000000000..ff3ba49e7 --- /dev/null +++ b/docs/workspace/aibuilder/index.rst @@ -0,0 +1,10 @@ + +AI Builder +========== + +Create and manage AI Builder resources. + +.. toctree:: + :maxdepth: 1 + + custom_llms \ No newline at end of file diff --git a/docs/workspace/catalog/catalogs.rst b/docs/workspace/catalog/catalogs.rst index 60959cad4..2505551cd 100644 --- a/docs/workspace/catalog/catalogs.rst +++ b/docs/workspace/catalog/catalogs.rst @@ -24,10 +24,10 @@ w = WorkspaceClient() - created = w.catalogs.create(name=f"sdk-{time.time_ns()}") + created_catalog = w.catalogs.create(name=f"sdk-{time.time_ns()}") # cleanup - w.catalogs.delete(name=created.name, force=True) + w.catalogs.delete(name=created_catalog.name, force=True) Create a catalog. diff --git a/docs/workspace/catalog/connections.rst b/docs/workspace/catalog/connections.rst index 463c9493e..50785cd23 100644 --- a/docs/workspace/catalog/connections.rst +++ b/docs/workspace/catalog/connections.rst @@ -57,7 +57,7 @@ :param comment: str (optional) User-provided free-form text description. :param properties: Dict[str,str] (optional) - An object containing map of key-value properties attached to the connection. + A map of key-value properties attached to the securable. :param read_only: bool (optional) If the connection is read only. diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index c9f1e3e37..91d9af27f 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -30,22 +30,20 @@ w = WorkspaceClient() - storage_credential = w.storage_credentials.create( + credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), - comment="created via SDK", ) - external_location = w.external_locations.create( + created = w.external_locations.create( name=f"sdk-{time.time_ns()}", - credential_name=storage_credential.name, - comment="created via SDK", - url="s3://" + os.environ["TEST_BUCKET"] + "/" + f"sdk-{time.time_ns()}", + credential_name=credential.name, + url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), ) # cleanup - w.storage_credentials.delete(name=storage_credential.name) - w.external_locations.delete(name=external_location.name) + w.storage_credentials.delete(name=credential.name) + w.external_locations.delete(name=created.name) Create an external location. @@ -190,24 +188,24 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', + url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), ) _ = w.external_locations.update( name=created.name, credential_name=credential.name, - url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', + url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), ) # cleanup - w.storage_credentials.delete(delete=credential.name) - w.external_locations.delete(delete=created.name) + w.storage_credentials.delete(name=credential.name) + w.external_locations.delete(name=created.name) Update an external location. diff --git a/docs/workspace/catalog/grants.rst b/docs/workspace/catalog/grants.rst index 73b3dae28..603a20584 100644 --- a/docs/workspace/catalog/grants.rst +++ b/docs/workspace/catalog/grants.rst @@ -14,7 +14,7 @@ within the catalog. Similarly, privileges granted on a schema are inherited by all current and future objects within that schema. - .. py:method:: get(securable_type: SecurableType, full_name: str [, principal: Optional[str]]) -> PermissionsList + .. py:method:: get(securable_type: str, full_name: str [, max_results: Optional[int], page_token: Optional[str], principal: Optional[str]]) -> GetPermissionsResponse Usage: @@ -62,19 +62,31 @@ Get permissions. - Gets the permissions for a securable. + Gets the permissions for a securable. Does not include inherited permissions. - :param securable_type: :class:`SecurableType` + :param securable_type: str Type of securable. :param full_name: str Full name of securable. + :param max_results: int (optional) + Specifies the maximum number of privileges to return (page length). Every PrivilegeAssignment + present in a single page response is guaranteed to contain all the privileges granted on the + requested Securable for the respective principal. + + If not set, all the permissions are returned. If set to - lesser than 0: invalid parameter error - + 0: page length is set to a server configured value - lesser than 150 but greater than 0: invalid + parameter error (this is to ensure that server is able to return at least one complete + PrivilegeAssignment in a single page response) - greater than (or equal to) 150: page length is the + minimum of this value and a server configured value + :param page_token: str (optional) + Opaque pagination token to go to next page based on previous query. :param principal: str (optional) If provided, only the permissions for the specified principal (user or group) are returned. - :returns: :class:`PermissionsList` + :returns: :class:`GetPermissionsResponse` - .. py:method:: get_effective(securable_type: SecurableType, full_name: str [, principal: Optional[str]]) -> EffectivePermissionsList + .. py:method:: get_effective(securable_type: str, full_name: str [, max_results: Optional[int], page_token: Optional[str], principal: Optional[str]]) -> EffectivePermissionsList Usage: @@ -122,12 +134,26 @@ Get effective permissions. - Gets the effective permissions for a securable. + Gets the effective permissions for a securable. Includes inherited permissions from any parent + securables. - :param securable_type: :class:`SecurableType` + :param securable_type: str Type of securable. :param full_name: str Full name of securable. + :param max_results: int (optional) + Specifies the maximum number of privileges to return (page length). Every + EffectivePrivilegeAssignment present in a single page response is guaranteed to contain all the + effective privileges granted on (or inherited by) the requested Securable for the respective + principal. + + If not set, all the effective permissions are returned. If set to - lesser than 0: invalid parameter + error - 0: page length is set to a server configured value - lesser than 150 but greater than 0: + invalid parameter error (this is to ensure that server is able to return at least one complete + EffectivePrivilegeAssignment in a single page response) - greater than (or equal to) 150: page + length is the minimum of this value and a server configured value + :param page_token: str (optional) + Opaque token for the next page of results (pagination). :param principal: str (optional) If provided, only the effective permissions for the specified principal (user or group) are returned. @@ -135,7 +161,7 @@ :returns: :class:`EffectivePermissionsList` - .. py:method:: update(securable_type: SecurableType, full_name: str [, changes: Optional[List[PermissionsChange]]]) -> PermissionsList + .. py:method:: update(securable_type: str, full_name: str [, changes: Optional[List[PermissionsChange]]]) -> UpdatePermissionsResponse Usage: @@ -193,12 +219,12 @@ Updates the permissions for a securable. - :param securable_type: :class:`SecurableType` + :param securable_type: str Type of securable. :param full_name: str Full name of securable. :param changes: List[:class:`PermissionsChange`] (optional) Array of permissions change objects. - :returns: :class:`PermissionsList` + :returns: :class:`UpdatePermissionsResponse` \ No newline at end of file diff --git a/docs/workspace/catalog/index.rst b/docs/workspace/catalog/index.rst index 7549bc487..471804098 100644 --- a/docs/workspace/catalog/index.rst +++ b/docs/workspace/catalog/index.rst @@ -11,7 +11,6 @@ Configure data governance with Unity Catalog for metastores, catalogs, schemas, catalogs connections credentials - database_instances external_locations functions grants diff --git a/docs/workspace/catalog/metastores.rst b/docs/workspace/catalog/metastores.rst index 52c36437e..cf35cc01b 100644 --- a/docs/workspace/catalog/metastores.rst +++ b/docs/workspace/catalog/metastores.rst @@ -53,7 +53,7 @@ :param metastore_id: str The unique ID of the metastore. :param default_catalog_name: str - The name of the default catalog in the metastore. This field is depracted. Please use "Default + The name of the default catalog in the metastore. This field is deprecated. Please use "Default Namespace API" to configure the default catalog for a Databricks workspace. @@ -91,9 +91,7 @@ :param name: str The user-specified name of the metastore. :param region: str (optional) - Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). The field can be omitted in - the __workspace-level__ __API__ but not in the __account-level__ __API__. If this field is omitted, - the region of the workspace receiving the request will be used. + Cloud region which the metastore serves (e.g., `us-west-2`, `westus`). :param storage_root: str (optional) The storage root URL for metastore @@ -169,7 +167,7 @@ :returns: :class:`MetastoreInfo` - .. py:method:: list() -> Iterator[MetastoreInfo] + .. py:method:: list( [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[MetastoreInfo] Usage: @@ -187,6 +185,17 @@ Gets an array of the available metastores (as __MetastoreInfo__ objects). The caller must be an admin to retrieve this info. There is no guarantee of a specific ordering of the elements in the array. + :param max_results: int (optional) + Maximum number of metastores to return. - when set to a value greater than 0, the page length is the + minimum of this value and a server configured value; - when set to 0, the page length is set to a + server configured value (recommended); - when set to a value less than 0, an invalid parameter error + is returned; - If not set, all the metastores are returned (not recommended). - Note: The number of + returned metastores might be less than the specified max_results size, even zero. The only + definitive indication that no further metastores can be fetched is when the next_page_token is unset + from the response. + :param page_token: str (optional) + Opaque pagination token to go to next page based on previous query. + :returns: Iterator over :class:`MetastoreInfo` @@ -249,7 +258,7 @@ - .. py:method:: update(id: str [, delta_sharing_organization_name: Optional[str], delta_sharing_recipient_token_lifetime_in_seconds: Optional[int], delta_sharing_scope: Optional[UpdateMetastoreDeltaSharingScope], new_name: Optional[str], owner: Optional[str], privilege_model_version: Optional[str], storage_root_credential_id: Optional[str]]) -> MetastoreInfo + .. py:method:: update(id: str [, delta_sharing_organization_name: Optional[str], delta_sharing_recipient_token_lifetime_in_seconds: Optional[int], delta_sharing_scope: Optional[DeltaSharingScopeEnum], new_name: Optional[str], owner: Optional[str], privilege_model_version: Optional[str], storage_root_credential_id: Optional[str]]) -> MetastoreInfo Usage: @@ -285,7 +294,7 @@ Sharing as the official name. :param delta_sharing_recipient_token_lifetime_in_seconds: int (optional) The lifetime of delta sharing recipient token in seconds. - :param delta_sharing_scope: :class:`UpdateMetastoreDeltaSharingScope` (optional) + :param delta_sharing_scope: :class:`DeltaSharingScopeEnum` (optional) The scope of Delta Sharing enabled for the metastore. :param new_name: str (optional) New name for the metastore. @@ -311,7 +320,7 @@ :param workspace_id: int A workspace ID. :param default_catalog_name: str (optional) - The name of the default catalog in the metastore. This field is depracted. Please use "Default + The name of the default catalog in the metastore. This field is deprecated. Please use "Default Namespace API" to configure the default catalog for a Databricks workspace. :param metastore_id: str (optional) The unique ID of the metastore. diff --git a/docs/workspace/catalog/schemas.rst b/docs/workspace/catalog/schemas.rst index d646a7489..7c4a84e53 100644 --- a/docs/workspace/catalog/schemas.rst +++ b/docs/workspace/catalog/schemas.rst @@ -179,6 +179,7 @@ :param comment: str (optional) User-provided free-form text description. :param enable_predictive_optimization: :class:`EnablePredictiveOptimization` (optional) + Whether predictive optimization should be enabled for this object and objects under it. :param new_name: str (optional) New name for the schema. :param owner: str (optional) diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index a1b985155..9a5ed0a46 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -30,13 +30,13 @@ w = WorkspaceClient() - created = w.storage_credentials.create( + credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=created.name) + w.storage_credentials.delete(name=credential.name) Create a storage credential. @@ -123,10 +123,11 @@ .. code-block:: from databricks.sdk import WorkspaceClient + from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.storage_credentials.list() + all = w.storage_credentials.list(catalog.ListStorageCredentialsRequest()) List credentials. @@ -164,17 +165,17 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) _ = w.storage_credentials.update( name=created.name, comment=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(name=created.name) + w.storage_credentials.delete(delete=created.name) Update a credential. diff --git a/docs/workspace/catalog/tables.rst b/docs/workspace/catalog/tables.rst index 0ff5bb2d1..4bbd3faad 100644 --- a/docs/workspace/catalog/tables.rst +++ b/docs/workspace/catalog/tables.rst @@ -123,7 +123,7 @@ created_schema = w.schemas.create(name=f"sdk-{time.time_ns()}", catalog_name=created_catalog.name) - summaries = w.tables.list_summaries(catalog_name=created_catalog.name, schema_name_pattern=created_schema.name) + all_tables = w.tables.list(catalog_name=created_catalog.name, schema_name=created_schema.name) # cleanup w.schemas.delete(full_name=created_schema.full_name) diff --git a/docs/workspace/cleanrooms/clean_room_assets.rst b/docs/workspace/cleanrooms/clean_room_assets.rst index 5021629c8..1fabe51cb 100644 --- a/docs/workspace/cleanrooms/clean_room_assets.rst +++ b/docs/workspace/cleanrooms/clean_room_assets.rst @@ -24,7 +24,7 @@ :returns: :class:`CleanRoomAsset` - .. py:method:: delete(clean_room_name: str, asset_type: CleanRoomAssetAssetType, asset_full_name: str) + .. py:method:: delete(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str) Delete an asset. @@ -34,13 +34,13 @@ Name of the clean room. :param asset_type: :class:`CleanRoomAssetAssetType` The type of the asset. - :param asset_full_name: str + :param name: str The fully qualified name of the asset, it is same as the name field in CleanRoomAsset. - .. py:method:: get(clean_room_name: str, asset_type: CleanRoomAssetAssetType, asset_full_name: str) -> CleanRoomAsset + .. py:method:: get(clean_room_name: str, asset_type: CleanRoomAssetAssetType, name: str) -> CleanRoomAsset Get an asset. @@ -50,7 +50,7 @@ Name of the clean room. :param asset_type: :class:`CleanRoomAssetAssetType` The type of the asset. - :param asset_full_name: str + :param name: str The fully qualified name of the asset, it is same as the name field in CleanRoomAsset. :returns: :class:`CleanRoomAsset` diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index fe9271c13..961b0472a 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -727,11 +727,10 @@ .. code-block:: from databricks.sdk import WorkspaceClient - from databricks.sdk.service import compute w = WorkspaceClient() - all = w.clusters.list(compute.ListClustersRequest()) + nodes = w.clusters.list_node_types() List clusters. diff --git a/docs/workspace/dashboards/genie.rst b/docs/workspace/dashboards/genie.rst index 60a1389f7..fde42d405 100644 --- a/docs/workspace/dashboards/genie.rst +++ b/docs/workspace/dashboards/genie.rst @@ -198,6 +198,20 @@ :returns: :class:`GenieSpace` + .. py:method:: list_spaces( [, page_size: Optional[int], page_token: Optional[str]]) -> GenieListSpacesResponse + + List Genie spaces. + + Get list of Genie Spaces. + + :param page_size: int (optional) + Maximum number of spaces to return per page + :param page_token: str (optional) + Pagination token for getting the next page of results + + :returns: :class:`GenieListSpacesResponse` + + .. py:method:: start_conversation(space_id: str, content: str) -> Wait[GenieMessage] Start conversation. diff --git a/docs/workspace/dashboards/index.rst b/docs/workspace/dashboards/index.rst index 940efa5dd..877891d25 100644 --- a/docs/workspace/dashboards/index.rst +++ b/docs/workspace/dashboards/index.rst @@ -9,5 +9,4 @@ Manage Lakeview dashboards genie lakeview - lakeview_embedded - query_execution \ No newline at end of file + lakeview_embedded \ No newline at end of file diff --git a/docs/workspace/dashboards/lakeview_embedded.rst b/docs/workspace/dashboards/lakeview_embedded.rst index fb22c47ad..ce7cc9248 100644 --- a/docs/workspace/dashboards/lakeview_embedded.rst +++ b/docs/workspace/dashboards/lakeview_embedded.rst @@ -6,18 +6,6 @@ Token-based Lakeview APIs for embedding dashboards in external applications. - .. py:method:: get_published_dashboard_embedded(dashboard_id: str) - - Read a published dashboard in an embedded ui. - - Get the current published dashboard within an embedded context. - - :param dashboard_id: str - UUID identifying the published dashboard. - - - - .. py:method:: get_published_dashboard_token_info(dashboard_id: str [, external_value: Optional[str], external_viewer_id: Optional[str]]) -> GetPublishedDashboardTokenInfoResponse Read an information of a published dashboard to mint an OAuth token. diff --git a/docs/workspace/database/database.rst b/docs/workspace/database/database.rst new file mode 100644 index 000000000..46a9dccab --- /dev/null +++ b/docs/workspace/database/database.rst @@ -0,0 +1,175 @@ +``w.database``: Database Instances +================================== +.. currentmodule:: databricks.sdk.service.database + +.. py:class:: DatabaseAPI + + Database Instances provide access to a database via REST API or direct SQL. + + .. py:method:: create_database_catalog(catalog: DatabaseCatalog) -> DatabaseCatalog + + Create a Database Catalog. + + :param catalog: :class:`DatabaseCatalog` + + :returns: :class:`DatabaseCatalog` + + + .. py:method:: create_database_instance(database_instance: DatabaseInstance) -> DatabaseInstance + + Create a Database Instance. + + :param database_instance: :class:`DatabaseInstance` + A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage. + + :returns: :class:`DatabaseInstance` + + + .. py:method:: create_database_table(table: DatabaseTable) -> DatabaseTable + + Create a Database Table. + + :param table: :class:`DatabaseTable` + Next field marker: 13 + + :returns: :class:`DatabaseTable` + + + .. py:method:: create_synced_database_table(synced_table: SyncedDatabaseTable) -> SyncedDatabaseTable + + Create a Synced Database Table. + + :param synced_table: :class:`SyncedDatabaseTable` + Next field marker: 12 + + :returns: :class:`SyncedDatabaseTable` + + + .. py:method:: delete_database_catalog(name: str) + + Delete a Database Catalog. + + :param name: str + + + + + .. py:method:: delete_database_instance(name: str [, force: Optional[bool], purge: Optional[bool]]) + + Delete a Database Instance. + + :param name: str + Name of the instance to delete. + :param force: bool (optional) + By default, a instance cannot be deleted if it has descendant instances created via PITR. If this + flag is specified as true, all descendent instances will be deleted as well. + :param purge: bool (optional) + If false, the database instance is soft deleted. Soft deleted instances behave as if they are + deleted, and cannot be used for CRUD operations nor connected to. However they can be undeleted by + calling the undelete API for a limited time. If true, the database instance is hard deleted and + cannot be undeleted. + + + + + .. py:method:: delete_database_table(name: str) + + Delete a Database Table. + + :param name: str + + + + + .. py:method:: delete_synced_database_table(name: str) + + Delete a Synced Database Table. + + :param name: str + + + + + .. py:method:: find_database_instance_by_uid( [, uid: Optional[str]]) -> DatabaseInstance + + Find a Database Instance by uid. + + :param uid: str (optional) + UID of the cluster to get. + + :returns: :class:`DatabaseInstance` + + + .. py:method:: generate_database_credential( [, instance_names: Optional[List[str]], request_id: Optional[str]]) -> DatabaseCredential + + Generates a credential that can be used to access database instances. + + :param instance_names: List[str] (optional) + Instances to which the token will be scoped. + :param request_id: str (optional) + + :returns: :class:`DatabaseCredential` + + + .. py:method:: get_database_catalog(name: str) -> DatabaseCatalog + + Get a Database Catalog. + + :param name: str + + :returns: :class:`DatabaseCatalog` + + + .. py:method:: get_database_instance(name: str) -> DatabaseInstance + + Get a Database Instance. + + :param name: str + Name of the cluster to get. + + :returns: :class:`DatabaseInstance` + + + .. py:method:: get_database_table(name: str) -> DatabaseTable + + Get a Database Table. + + :param name: str + + :returns: :class:`DatabaseTable` + + + .. py:method:: get_synced_database_table(name: str) -> SyncedDatabaseTable + + Get a Synced Database Table. + + :param name: str + + :returns: :class:`SyncedDatabaseTable` + + + .. py:method:: list_database_instances( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DatabaseInstance] + + List Database Instances. + + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of Database Instances. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseInstance` + + + .. py:method:: update_database_instance(name: str, database_instance: DatabaseInstance, update_mask: str) -> DatabaseInstance + + Update a Database Instance. + + :param name: str + The name of the instance. This is the unique identifier for the instance. + :param database_instance: :class:`DatabaseInstance` + A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage. + :param update_mask: str + The list of fields to update. + + :returns: :class:`DatabaseInstance` + \ No newline at end of file diff --git a/docs/workspace/database/index.rst b/docs/workspace/database/index.rst new file mode 100644 index 000000000..644c20f56 --- /dev/null +++ b/docs/workspace/database/index.rst @@ -0,0 +1,10 @@ + +Database Instances +================== + +Create Database Instances and manage their configurations, including integrations with Unity Catalog + +.. toctree:: + :maxdepth: 1 + + database \ No newline at end of file diff --git a/docs/workspace/files/files.rst b/docs/workspace/files/files.rst index ad7bca57e..f3e4ae304 100644 --- a/docs/workspace/files/files.rst +++ b/docs/workspace/files/files.rst @@ -20,6 +20,8 @@ `enable_experimental_files_api_client = True` in your configuration profile or use the environment variable `DATABRICKS_ENABLE_EXPERIMENTAL_FILES_API_CLIENT=True`. + Use of Files API may incur Databricks data transfer charges. + [Unity Catalog volumes]: https://docs.databricks.com/en/connect/unity-catalog/volumes.html .. py:method:: create_directory(directory_path: str) diff --git a/docs/workspace/iam/groups.rst b/docs/workspace/iam/groups.rst index 0b62b675a..fe0187cd6 100644 --- a/docs/workspace/iam/groups.rst +++ b/docs/workspace/iam/groups.rst @@ -71,6 +71,9 @@ group = w.groups.create(display_name=f"sdk-{time.time_ns()}") w.groups.delete(id=group.id) + + # cleanup + w.groups.delete(id=group.id) Delete a group. diff --git a/docs/workspace/iam/service_principals.rst b/docs/workspace/iam/service_principals.rst index 40b65f6d5..74a498b00 100644 --- a/docs/workspace/iam/service_principals.rst +++ b/docs/workspace/iam/service_principals.rst @@ -20,13 +20,19 @@ import time from databricks.sdk import WorkspaceClient + from databricks.sdk.service import iam w = WorkspaceClient() - created = w.service_principals.create(display_name=f"sdk-{time.time_ns()}") + groups = w.groups.group_display_name_to_id_map(iam.ListGroupsRequest()) + + spn = w.service_principals.create( + display_name=f"sdk-{time.time_ns()}", + groups=[iam.ComplexValue(value=groups["admins"])], + ) # cleanup - w.service_principals.delete(id=created.id) + w.service_principals.delete(id=spn.id) Create a service principal. diff --git a/docs/workspace/iam/users.rst b/docs/workspace/iam/users.rst index 34de48f3b..76837ac54 100644 --- a/docs/workspace/iam/users.rst +++ b/docs/workspace/iam/users.rst @@ -80,12 +80,9 @@ w = WorkspaceClient() - user = w.users.create( - display_name=f"sdk-{time.time_ns()}", - user_name=f"sdk-{time.time_ns()}@example.com", - ) + other_owner = w.users.create(user_name=f"sdk-{time.time_ns()}@example.com") - w.users.delete(id=user.id) + w.users.delete(id=other_owner.id) Delete a user. diff --git a/docs/workspace/index.rst b/docs/workspace/index.rst index dc86a0e78..d7ecc203e 100644 --- a/docs/workspace/index.rst +++ b/docs/workspace/index.rst @@ -7,17 +7,20 @@ These APIs are available from WorkspaceClient .. toctree:: :maxdepth: 1 + aibuilder/index apps/index catalog/index cleanrooms/index compute/index dashboards/index + database/index files/index iam/index jobs/index marketplace/index ml/index pipelines/index + qualitymonitorv2/index serving/index settings/index sharing/index diff --git a/docs/workspace/jobs/jobs.rst b/docs/workspace/jobs/jobs.rst index 27eed0a54..89ea5e2ae 100644 --- a/docs/workspace/jobs/jobs.rst +++ b/docs/workspace/jobs/jobs.rst @@ -367,21 +367,23 @@ w.clusters.ensure_cluster_is_running(os.environ["DATABRICKS_CLUSTER_ID"]) and os.environ["DATABRICKS_CLUSTER_ID"] ) - run = w.jobs.submit( - run_name=f"sdk-{time.time_ns()}", + created_job = w.jobs.create( + name=f"sdk-{time.time_ns()}", tasks=[ - jobs.SubmitTask( + jobs.Task( + description="test", existing_cluster_id=cluster_id, notebook_task=jobs.NotebookTask(notebook_path=notebook_path), - task_key=f"sdk-{time.time_ns()}", + task_key="test", + timeout_seconds=0, ) ], - ).result() + ) - output = w.jobs.get_run_output(run_id=run.tasks[0].run_id) + by_id = w.jobs.get(job_id=created_job.job_id) # cleanup - w.jobs.delete_run(run_id=run.run_id) + w.jobs.delete(job_id=created_job.job_id) Get a single job. diff --git a/docs/workspace/ml/experiments.rst b/docs/workspace/ml/experiments.rst index f18915885..791931167 100644 --- a/docs/workspace/ml/experiments.rst +++ b/docs/workspace/ml/experiments.rst @@ -207,7 +207,7 @@ The ID of the logged model to finalize. :param status: :class:`LoggedModelStatus` Whether or not the model is ready for use. ``"LOGGED_MODEL_UPLOAD_FAILED"`` indicates that something - went wrong when logging the model weights / agent code). + went wrong when logging the model weights / agent code. :returns: :class:`FinalizeLoggedModelResponse` @@ -230,26 +230,6 @@ :returns: :class:`GetExperimentByNameResponse` - .. py:method:: get_credentials_for_trace_data_download(request_id: str) -> GetCredentialsForTraceDataDownloadResponse - - Get credentials to download trace data. - - :param request_id: str - The ID of the trace to fetch artifact download credentials for. - - :returns: :class:`GetCredentialsForTraceDataDownloadResponse` - - - .. py:method:: get_credentials_for_trace_data_upload(request_id: str) -> GetCredentialsForTraceDataUploadResponse - - Get credentials to upload trace data. - - :param request_id: str - The ID of the trace to fetch artifact upload credentials for. - - :returns: :class:`GetCredentialsForTraceDataUploadResponse` - - .. py:method:: get_experiment(experiment_id: str) -> GetExperimentResponse @@ -411,26 +391,6 @@ :returns: Iterator over :class:`Experiment` - .. py:method:: list_logged_model_artifacts(model_id: str [, artifact_directory_path: Optional[str], page_token: Optional[str]]) -> ListLoggedModelArtifactsResponse - - List artifacts for a logged model. - - List artifacts for a logged model. Takes an optional ``artifact_directory_path`` prefix which if - specified, the response contains only artifacts with the specified prefix. - - :param model_id: str - The ID of the logged model for which to list the artifacts. - :param artifact_directory_path: str (optional) - Filter artifacts matching this path (a relative path from the root artifact directory). - :param page_token: str (optional) - Token indicating the page of artifact results to fetch. `page_token` is not supported when listing - artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. Please call - `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which supports - pagination. See [List directory contents | Files API](/api/workspace/files/listdirectorycontents). - - :returns: :class:`ListLoggedModelArtifactsResponse` - - .. py:method:: log_batch( [, metrics: Optional[List[Metric]], params: Optional[List[Param]], run_id: Optional[str], tags: Optional[List[RunTag]]]) Log a batch of metrics/params/tags for a run. diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 23c357275..4c1b3d917 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -94,9 +94,7 @@ w = WorkspaceClient() - model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - - mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + created = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") Create a model. @@ -129,7 +127,7 @@ model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Create a model version. @@ -242,7 +240,8 @@ :param http_url_spec: :class:`HttpUrlSpec` (optional) :param job_spec: :class:`JobSpec` (optional) :param model_name: str (optional) - Name of the model whose events would trigger this webhook. + If model name is not specified, a registry-wide webhook is created that listens for the specified + events across all versions of all registered models. :param status: :class:`RegistryWebhookStatus` (optional) Enable or disable triggering the webhook, or put the webhook into test mode. The default is `ACTIVE`: * `ACTIVE`: Webhook is triggered when an associated event happens. @@ -262,6 +261,7 @@ Deletes a comment on a model version. :param id: str + Unique identifier of an activity diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index 5b52818da..92d8582db 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -15,7 +15,7 @@ also enforce data quality with Delta Live Tables expectations. Expectations allow you to define expected data quality and specify how to handle records that fail those expectations. - .. py:method:: create( [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], event_log: Optional[EventLogSpec], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], root_path: Optional[str], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) -> CreatePipelineResponse + .. py:method:: create( [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], event_log: Optional[EventLogSpec], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], root_path: Optional[str], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], tags: Optional[Dict[str, str]], target: Optional[str], trigger: Optional[PipelineTrigger]]) -> CreatePipelineResponse Usage: @@ -116,6 +116,9 @@ Whether serverless compute is enabled for this pipeline. :param storage: str (optional) DBFS root directory for storing checkpoints and tables. + :param tags: Dict[str,str] (optional) + A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and + are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline. :param target: str (optional) Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated @@ -130,7 +133,8 @@ Delete a pipeline. - Deletes a pipeline. + Deletes a pipeline. Deleting a pipeline is a permanent action that stops and removes the pipeline and + its tables. You cannot undo this action. :param pipeline_id: str @@ -402,7 +406,7 @@ .. py:method:: stop_and_wait(pipeline_id: str, timeout: datetime.timedelta = 0:20:00) -> GetPipelineResponse - .. py:method:: update(pipeline_id: str [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], edition: Optional[str], event_log: Optional[EventLogSpec], expected_last_modified: Optional[int], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], root_path: Optional[str], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) + .. py:method:: update(pipeline_id: str [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], edition: Optional[str], event_log: Optional[EventLogSpec], expected_last_modified: Optional[int], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], root_path: Optional[str], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], tags: Optional[Dict[str, str]], target: Optional[str], trigger: Optional[PipelineTrigger]]) Usage: @@ -522,6 +526,9 @@ Whether serverless compute is enabled for this pipeline. :param storage: str (optional) DBFS root directory for storing checkpoints and tables. + :param tags: Dict[str,str] (optional) + A map of tags associated with the pipeline. These are forwarded to the cluster as cluster tags, and + are therefore subject to the same limitations. A maximum of 25 tags can be added to the pipeline. :param target: str (optional) Target schema (database) to add tables in this pipeline to. Exactly one of `schema` or `target` must be specified. To publish to Unity Catalog, also specify `catalog`. This legacy field is deprecated diff --git a/docs/workspace/qualitymonitorv2/index.rst b/docs/workspace/qualitymonitorv2/index.rst new file mode 100644 index 000000000..ccbac6108 --- /dev/null +++ b/docs/workspace/qualitymonitorv2/index.rst @@ -0,0 +1,10 @@ + +Quality Monitor +=============== + +Manage quality monitor on Unity Catalog objects. + +.. toctree:: + :maxdepth: 1 + + quality_monitor_v2 \ No newline at end of file diff --git a/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst b/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst new file mode 100644 index 000000000..50f647795 --- /dev/null +++ b/docs/workspace/qualitymonitorv2/quality_monitor_v2.rst @@ -0,0 +1,73 @@ +``w.quality_monitor_v2``: QualityMonitor.v2 +=========================================== +.. currentmodule:: databricks.sdk.service.qualitymonitorv2 + +.. py:class:: QualityMonitorV2API + + Manage data quality of UC objects (currently support `schema`) + + .. py:method:: create_quality_monitor(quality_monitor: QualityMonitor) -> QualityMonitor + + Create a quality monitor. + + Create a quality monitor on UC object + + :param quality_monitor: :class:`QualityMonitor` + + :returns: :class:`QualityMonitor` + + + .. py:method:: delete_quality_monitor(object_type: str, object_id: str) + + Delete a quality monitor. + + Delete a quality monitor on UC object + + :param object_type: str + The type of the monitored object. Can be one of the following: schema. + :param object_id: str + The uuid of the request object. For example, schema id. + + + + + .. py:method:: get_quality_monitor(object_type: str, object_id: str) -> QualityMonitor + + Read a quality monitor. + + Read a quality monitor on UC object + + :param object_type: str + The type of the monitored object. Can be one of the following: schema. + :param object_id: str + The uuid of the request object. For example, schema id. + + :returns: :class:`QualityMonitor` + + + .. py:method:: list_quality_monitor( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[QualityMonitor] + + List quality monitors. + + (Unimplemented) List quality monitors + + :param page_size: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`QualityMonitor` + + + .. py:method:: update_quality_monitor(object_type: str, object_id: str, quality_monitor: QualityMonitor) -> QualityMonitor + + Update a quality monitor. + + (Unimplemented) Update a quality monitor on UC object + + :param object_type: str + The type of the monitored object. Can be one of the following: schema. + :param object_id: str + The uuid of the request object. For example, schema id. + :param quality_monitor: :class:`QualityMonitor` + + :returns: :class:`QualityMonitor` + \ No newline at end of file diff --git a/docs/workspace/settings/dashboard_email_subscriptions.rst b/docs/workspace/settings/dashboard_email_subscriptions.rst new file mode 100644 index 000000000..22da502f6 --- /dev/null +++ b/docs/workspace/settings/dashboard_email_subscriptions.rst @@ -0,0 +1,64 @@ +``w.settings.dashboard_email_subscriptions``: Dashboard Email Subscriptions +=========================================================================== +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: DashboardEmailSubscriptionsAPI + + Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can send + subscription emails containing PDFs and/or images of the dashboard. By default, this setting is enabled + (set to `true`) + + .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDashboardEmailSubscriptionsResponse + + Delete the Dashboard Email Subscriptions setting. + + Reverts the Dashboard Email Subscriptions setting to its default value. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteDashboardEmailSubscriptionsResponse` + + + .. py:method:: get( [, etag: Optional[str]]) -> DashboardEmailSubscriptions + + Get the Dashboard Email Subscriptions setting. + + Gets the Dashboard Email Subscriptions setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DashboardEmailSubscriptions` + + + .. py:method:: update(allow_missing: bool, setting: DashboardEmailSubscriptions, field_mask: str) -> DashboardEmailSubscriptions + + Update the Dashboard Email Subscriptions setting. + + Updates the Dashboard Email Subscriptions setting. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`DashboardEmailSubscriptions` + :param field_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`DashboardEmailSubscriptions` + \ No newline at end of file diff --git a/docs/workspace/settings/index.rst b/docs/workspace/settings/index.rst index c1b45519c..6c0858b7a 100644 --- a/docs/workspace/settings/index.rst +++ b/docs/workspace/settings/index.rst @@ -15,6 +15,7 @@ Manage security settings for Accounts and Workspaces aibi_dashboard_embedding_approved_domains automatic_cluster_update compliance_security_profile + dashboard_email_subscriptions default_namespace disable_legacy_access disable_legacy_dbfs @@ -24,6 +25,7 @@ Manage security settings for Accounts and Workspaces enhanced_security_monitoring llm_proxy_partner_powered_workspace restrict_workspace_admins + sql_results_download token_management tokens workspace_conf \ No newline at end of file diff --git a/docs/workspace/settings/settings.rst b/docs/workspace/settings/settings.rst index 6eddb2508..783f55b6f 100644 --- a/docs/workspace/settings/settings.rst +++ b/docs/workspace/settings/settings.rst @@ -32,6 +32,13 @@ This settings can NOT be disabled once it is enabled. + .. py:property:: dashboard_email_subscriptions + :type: DashboardEmailSubscriptionsAPI + + Controls whether schedules or workload tasks for refreshing AI/BI Dashboards in the workspace can send + subscription emails containing PDFs and/or images of the dashboard. By default, this setting is enabled + (set to `true`) + .. py:property:: default_namespace :type: DefaultNamespaceAPI @@ -110,4 +117,10 @@ RESTRICT_TOKENS_AND_JOB_RUN_AS, workspace admins can only create personal access tokens on behalf of service principals they have the Service Principal User role on. They can also only change a job owner to themselves. And they can change the job run_as setting to themselves or to a service principal on which - they have the Service Principal User role. \ No newline at end of file + they have the Service Principal User role. + + .. py:property:: sql_results_download + :type: SqlResultsDownloadAPI + + Controls whether users within the workspace are allowed to download results from the SQL Editor and AI/BI + Dashboards UIs. By default, this setting is enabled (set to `true`) \ No newline at end of file diff --git a/docs/workspace/settings/sql_results_download.rst b/docs/workspace/settings/sql_results_download.rst new file mode 100644 index 000000000..8cf1cc13a --- /dev/null +++ b/docs/workspace/settings/sql_results_download.rst @@ -0,0 +1,63 @@ +``w.settings.sql_results_download``: SQL Results Download +========================================================= +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: SqlResultsDownloadAPI + + Controls whether users within the workspace are allowed to download results from the SQL Editor and AI/BI + Dashboards UIs. By default, this setting is enabled (set to `true`) + + .. py:method:: delete( [, etag: Optional[str]]) -> DeleteSqlResultsDownloadResponse + + Delete the SQL Results Download setting. + + Reverts the SQL Results Download setting to its default value. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteSqlResultsDownloadResponse` + + + .. py:method:: get( [, etag: Optional[str]]) -> SqlResultsDownload + + Get the SQL Results Download setting. + + Gets the SQL Results Download setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`SqlResultsDownload` + + + .. py:method:: update(allow_missing: bool, setting: SqlResultsDownload, field_mask: str) -> SqlResultsDownload + + Update the SQL Results Download setting. + + Updates the SQL Results Download setting. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`SqlResultsDownload` + :param field_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`SqlResultsDownload` + \ No newline at end of file diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index 263545400..d78dd62a0 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -108,12 +108,25 @@ .. code-block:: + import time + from databricks.sdk import WorkspaceClient - from databricks.sdk.service import sharing w = WorkspaceClient() - all = w.providers.list(sharing.ListProvidersRequest()) + public_share_recipient = """{ + "shareCredentialsVersion":1, + "bearerToken":"dapiabcdefghijklmonpqrstuvwxyz", + "endpoint":"https://sharing.delta.io/delta-sharing/" + } + """ + + created = w.providers.create(name=f"sdk-{time.time_ns()}", recipient_profile_str=public_share_recipient) + + shares = w.providers.list_shares(name=created.name) + + # cleanup + w.providers.delete(name=created.name) List providers. diff --git a/docs/workspace/sharing/shares.rst b/docs/workspace/sharing/shares.rst index 8d1dfea02..05dea6902 100644 --- a/docs/workspace/sharing/shares.rst +++ b/docs/workspace/sharing/shares.rst @@ -242,7 +242,7 @@ :param name: str The name of the share. :param changes: List[:class:`PermissionsChange`] (optional) - Array of permission changes. + Array of permissions change objects. :param omit_permissions_list: bool (optional) Optional. Whether to return the latest permissions list of the share in the response. diff --git a/docs/workspace/sql/alerts_v2.rst b/docs/workspace/sql/alerts_v2.rst index 2ff773949..0c61c7d7c 100644 --- a/docs/workspace/sql/alerts_v2.rst +++ b/docs/workspace/sql/alerts_v2.rst @@ -4,7 +4,7 @@ .. py:class:: AlertsV2API - TODO: Add description + New version of SQL Alerts .. py:method:: create_alert(alert: AlertV2) -> AlertV2 diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index a33023065..abfc30860 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -188,7 +188,7 @@ content=base64.b64encode(("CREATE LIVE TABLE dlt_sample AS SELECT 1").encode()).decode(), format=workspace.ImportFormat.SOURCE, language=workspace.Language.SQL, - overwrite=True, + overwrite=true_, path=notebook_path, ) @@ -235,16 +235,14 @@ .. code-block:: - import os - import time - from databricks.sdk import WorkspaceClient w = WorkspaceClient() - notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" - - objects = w.workspace.list(path=os.path.dirname(notebook)) + names = [] + for i in w.workspace.list(f"/Users/{w.current_user.me().user_name}", recursive=True): + names.append(i.path) + assert len(names) > 0 List workspace objects