From 5bacb97c4e96291117b47134c020f76cf80fa45d Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Mon, 26 May 2025 16:20:21 +0000 Subject: [PATCH 1/2] Update APIs --- .codegen.json | 6 +- databricks/sdk/__init__.py | 535 +++++++++--------- docs/account/iam/access_control.rst | 15 + docs/account/iam/workspace_assignment.rst | 8 +- docs/account/provisioning/storage.rst | 6 +- docs/account/settings/index.rst | 6 +- .../llm_proxy_partner_powered_account.rst | 46 ++ .../llm_proxy_partner_powered_enforce.rst | 47 ++ docs/account/settings/network_policies.rst | 73 +++ docs/account/settings/settings.rst | 11 + .../workspace_network_configuration.rst | 39 ++ docs/dbdataclasses/apps.rst | 17 + docs/dbdataclasses/catalog.rst | 174 ++++-- docs/dbdataclasses/cleanrooms.rst | 23 + docs/dbdataclasses/compute.rst | 25 +- docs/dbdataclasses/dashboards.rst | 21 + docs/dbdataclasses/iam.rst | 18 +- docs/dbdataclasses/jobs.rst | 5 +- docs/dbdataclasses/ml.rst | 109 ++++ docs/dbdataclasses/pipelines.rst | 52 ++ docs/dbdataclasses/serving.rst | 16 + docs/dbdataclasses/settings.rst | 108 ++++ docs/dbdataclasses/sharing.rst | 12 + docs/dbdataclasses/sql.rst | 12 - docs/workspace/catalog/database_instances.rst | 136 +++++ docs/workspace/catalog/external_locations.rst | 39 +- docs/workspace/catalog/index.rst | 1 + docs/workspace/catalog/schemas.rst | 1 - .../workspace/catalog/storage_credentials.rst | 15 +- docs/workspace/catalog/system_schemas.rst | 4 +- docs/workspace/catalog/workspace_bindings.rst | 30 +- docs/workspace/compute/clusters.rst | 6 +- docs/workspace/compute/instance_pools.rst | 5 +- docs/workspace/compute/instance_profiles.rst | 6 +- docs/workspace/files/files.rst | 3 +- .../iam/account_access_control_proxy.rst | 23 +- docs/workspace/iam/current_user.rst | 2 +- docs/workspace/iam/permissions.rst | 68 +-- docs/workspace/iam/service_principals.rst | 10 +- docs/workspace/iam/users.rst | 8 +- docs/workspace/ml/experiments.rst | 158 ++++++ docs/workspace/ml/forecasting.rst | 9 +- docs/workspace/ml/model_registry.rst | 4 +- docs/workspace/pipelines/pipelines.rst | 14 +- docs/workspace/serving/serving_endpoints.rst | 44 ++ .../settings/disable_legacy_dbfs.rst | 10 +- docs/workspace/settings/index.rst | 1 + .../llm_proxy_partner_powered_workspace.rst | 62 ++ docs/workspace/settings/settings.rst | 15 +- docs/workspace/sharing/index.rst | 1 + docs/workspace/sharing/providers.rst | 17 +- .../sharing/recipient_federation_policies.rst | 125 ++++ docs/workspace/sharing/shares.rst | 4 +- docs/workspace/sql/alerts.rst | 5 +- docs/workspace/sql/alerts_v2.rst | 12 +- docs/workspace/sql/queries.rst | 5 +- docs/workspace/workspace/workspace.rst | 25 +- 57 files changed, 1742 insertions(+), 510 deletions(-) create mode 100644 docs/account/settings/llm_proxy_partner_powered_account.rst create mode 100644 docs/account/settings/llm_proxy_partner_powered_enforce.rst create mode 100644 docs/account/settings/network_policies.rst create mode 100644 docs/account/settings/workspace_network_configuration.rst create mode 100644 docs/workspace/catalog/database_instances.rst create mode 100644 docs/workspace/settings/llm_proxy_partner_powered_workspace.rst create mode 100644 docs/workspace/sharing/recipient_federation_policies.rst diff --git a/.codegen.json b/.codegen.json index f98f968ab..65077c1cc 100644 --- a/.codegen.json +++ b/.codegen.json @@ -5,9 +5,9 @@ "databricks/sdk/version.py": "__version__ = \"$VERSION\"" }, "toolchain": { - "required": ["python3"], + "required": ["python3.12"], "pre_setup": [ - "python3 -m venv .databricks" + "python3.12 -m venv .databricks" ], "prepend_path": ".databricks/bin", "setup": [ @@ -17,7 +17,7 @@ "make fmt", "pytest -m 'not integration' --cov=databricks --cov-report html tests", "pip install .", - "python docs/gen-client-docs.py" + "python3.12 docs/gen-client-docs.py" ] } } diff --git a/databricks/sdk/__init__.py b/databricks/sdk/__init__.py index 83f8eeab2..f75645d25 100755 --- a/databricks/sdk/__init__.py +++ b/databricks/sdk/__init__.py @@ -5,7 +5,6 @@ import databricks.sdk.core as client import databricks.sdk.dbutils as dbutils -import databricks.sdk.service as service from databricks.sdk import azure from databricks.sdk.credentials_provider import CredentialsStrategy from databricks.sdk.data_plane import DataPlaneTokenSource @@ -14,6 +13,26 @@ from databricks.sdk.mixins.jobs import JobsExt from databricks.sdk.mixins.open_ai_client import ServingEndpointsExt from databricks.sdk.mixins.workspace import WorkspaceExt +from databricks.sdk.service import apps as pkg_apps +from databricks.sdk.service import billing as pkg_billing +from databricks.sdk.service import catalog as pkg_catalog +from databricks.sdk.service import cleanrooms as pkg_cleanrooms +from databricks.sdk.service import compute as pkg_compute +from databricks.sdk.service import dashboards as pkg_dashboards +from databricks.sdk.service import files as pkg_files +from databricks.sdk.service import iam as pkg_iam +from databricks.sdk.service import jobs as pkg_jobs +from databricks.sdk.service import marketplace as pkg_marketplace +from databricks.sdk.service import ml as pkg_ml +from databricks.sdk.service import oauth2 as pkg_oauth2 +from databricks.sdk.service import pipelines as pkg_pipelines +from databricks.sdk.service import provisioning as pkg_provisioning +from databricks.sdk.service import serving as pkg_serving +from databricks.sdk.service import settings as pkg_settings +from databricks.sdk.service import sharing as pkg_sharing +from databricks.sdk.service import sql as pkg_sql +from databricks.sdk.service import vectorsearch as pkg_vectorsearch +from databricks.sdk.service import workspace as pkg_workspace from databricks.sdk.service.apps import AppsAPI from databricks.sdk.service.billing import (BillableUsageAPI, BudgetPolicyAPI, BudgetsAPI, LogDeliveryAPI, @@ -209,119 +228,115 @@ def __init__( self._dbutils = _make_dbutils(self._config) self._api_client = client.ApiClient(self._config) serving_endpoints = ServingEndpointsExt(self._api_client) - self._access_control = service.iam.AccessControlAPI(self._api_client) - self._account_access_control_proxy = service.iam.AccountAccessControlProxyAPI(self._api_client) - self._alerts = service.sql.AlertsAPI(self._api_client) - self._alerts_legacy = service.sql.AlertsLegacyAPI(self._api_client) - self._alerts_v2 = service.sql.AlertsV2API(self._api_client) - self._apps = service.apps.AppsAPI(self._api_client) - self._artifact_allowlists = service.catalog.ArtifactAllowlistsAPI(self._api_client) - self._catalogs = service.catalog.CatalogsAPI(self._api_client) - self._clean_room_assets = service.cleanrooms.CleanRoomAssetsAPI(self._api_client) - self._clean_room_task_runs = service.cleanrooms.CleanRoomTaskRunsAPI(self._api_client) - self._clean_rooms = service.cleanrooms.CleanRoomsAPI(self._api_client) - self._cluster_policies = service.compute.ClusterPoliciesAPI(self._api_client) + self._access_control = pkg_iam.AccessControlAPI(self._api_client) + self._account_access_control_proxy = pkg_iam.AccountAccessControlProxyAPI(self._api_client) + self._alerts = pkg_sql.AlertsAPI(self._api_client) + self._alerts_legacy = pkg_sql.AlertsLegacyAPI(self._api_client) + self._alerts_v2 = pkg_sql.AlertsV2API(self._api_client) + self._apps = pkg_apps.AppsAPI(self._api_client) + self._artifact_allowlists = pkg_catalog.ArtifactAllowlistsAPI(self._api_client) + self._catalogs = pkg_catalog.CatalogsAPI(self._api_client) + self._clean_room_assets = pkg_cleanrooms.CleanRoomAssetsAPI(self._api_client) + self._clean_room_task_runs = pkg_cleanrooms.CleanRoomTaskRunsAPI(self._api_client) + self._clean_rooms = pkg_cleanrooms.CleanRoomsAPI(self._api_client) + self._cluster_policies = pkg_compute.ClusterPoliciesAPI(self._api_client) self._clusters = ClustersExt(self._api_client) - self._command_execution = service.compute.CommandExecutionAPI(self._api_client) - self._connections = service.catalog.ConnectionsAPI(self._api_client) - self._consumer_fulfillments = service.marketplace.ConsumerFulfillmentsAPI(self._api_client) - self._consumer_installations = service.marketplace.ConsumerInstallationsAPI(self._api_client) - self._consumer_listings = service.marketplace.ConsumerListingsAPI(self._api_client) - self._consumer_personalization_requests = service.marketplace.ConsumerPersonalizationRequestsAPI( - self._api_client - ) - self._consumer_providers = service.marketplace.ConsumerProvidersAPI(self._api_client) - self._credentials = service.catalog.CredentialsAPI(self._api_client) - self._credentials_manager = service.settings.CredentialsManagerAPI(self._api_client) - self._current_user = service.iam.CurrentUserAPI(self._api_client) - self._dashboard_widgets = service.sql.DashboardWidgetsAPI(self._api_client) - self._dashboards = service.sql.DashboardsAPI(self._api_client) - self._data_sources = service.sql.DataSourcesAPI(self._api_client) - self._database_instances = service.catalog.DatabaseInstancesAPI(self._api_client) + self._command_execution = pkg_compute.CommandExecutionAPI(self._api_client) + self._connections = pkg_catalog.ConnectionsAPI(self._api_client) + self._consumer_fulfillments = pkg_marketplace.ConsumerFulfillmentsAPI(self._api_client) + self._consumer_installations = pkg_marketplace.ConsumerInstallationsAPI(self._api_client) + self._consumer_listings = pkg_marketplace.ConsumerListingsAPI(self._api_client) + self._consumer_personalization_requests = pkg_marketplace.ConsumerPersonalizationRequestsAPI(self._api_client) + self._consumer_providers = pkg_marketplace.ConsumerProvidersAPI(self._api_client) + self._credentials = pkg_catalog.CredentialsAPI(self._api_client) + self._credentials_manager = pkg_settings.CredentialsManagerAPI(self._api_client) + self._current_user = pkg_iam.CurrentUserAPI(self._api_client) + self._dashboard_widgets = pkg_sql.DashboardWidgetsAPI(self._api_client) + self._dashboards = pkg_sql.DashboardsAPI(self._api_client) + self._data_sources = pkg_sql.DataSourcesAPI(self._api_client) + self._database_instances = pkg_catalog.DatabaseInstancesAPI(self._api_client) self._dbfs = DbfsExt(self._api_client) - self._dbsql_permissions = service.sql.DbsqlPermissionsAPI(self._api_client) - self._experiments = service.ml.ExperimentsAPI(self._api_client) - self._external_locations = service.catalog.ExternalLocationsAPI(self._api_client) + self._dbsql_permissions = pkg_sql.DbsqlPermissionsAPI(self._api_client) + self._experiments = pkg_ml.ExperimentsAPI(self._api_client) + self._external_locations = pkg_catalog.ExternalLocationsAPI(self._api_client) self._files = _make_files_client(self._api_client, self._config) - self._functions = service.catalog.FunctionsAPI(self._api_client) - self._genie = service.dashboards.GenieAPI(self._api_client) - self._git_credentials = service.workspace.GitCredentialsAPI(self._api_client) - self._global_init_scripts = service.compute.GlobalInitScriptsAPI(self._api_client) - self._grants = service.catalog.GrantsAPI(self._api_client) - self._groups = service.iam.GroupsAPI(self._api_client) - self._instance_pools = service.compute.InstancePoolsAPI(self._api_client) - self._instance_profiles = service.compute.InstanceProfilesAPI(self._api_client) - self._ip_access_lists = service.settings.IpAccessListsAPI(self._api_client) + self._functions = pkg_catalog.FunctionsAPI(self._api_client) + self._genie = pkg_dashboards.GenieAPI(self._api_client) + self._git_credentials = pkg_workspace.GitCredentialsAPI(self._api_client) + self._global_init_scripts = pkg_compute.GlobalInitScriptsAPI(self._api_client) + self._grants = pkg_catalog.GrantsAPI(self._api_client) + self._groups = pkg_iam.GroupsAPI(self._api_client) + self._instance_pools = pkg_compute.InstancePoolsAPI(self._api_client) + self._instance_profiles = pkg_compute.InstanceProfilesAPI(self._api_client) + self._ip_access_lists = pkg_settings.IpAccessListsAPI(self._api_client) self._jobs = JobsExt(self._api_client) - self._lakeview = service.dashboards.LakeviewAPI(self._api_client) - self._lakeview_embedded = service.dashboards.LakeviewEmbeddedAPI(self._api_client) - self._libraries = service.compute.LibrariesAPI(self._api_client) - self._metastores = service.catalog.MetastoresAPI(self._api_client) - self._model_registry = service.ml.ModelRegistryAPI(self._api_client) - self._model_versions = service.catalog.ModelVersionsAPI(self._api_client) - self._notification_destinations = service.settings.NotificationDestinationsAPI(self._api_client) - self._online_tables = service.catalog.OnlineTablesAPI(self._api_client) - self._permission_migration = service.iam.PermissionMigrationAPI(self._api_client) - self._permissions = service.iam.PermissionsAPI(self._api_client) - self._pipelines = service.pipelines.PipelinesAPI(self._api_client) - self._policy_compliance_for_clusters = service.compute.PolicyComplianceForClustersAPI(self._api_client) - self._policy_compliance_for_jobs = service.jobs.PolicyComplianceForJobsAPI(self._api_client) - self._policy_families = service.compute.PolicyFamiliesAPI(self._api_client) - self._provider_exchange_filters = service.marketplace.ProviderExchangeFiltersAPI(self._api_client) - self._provider_exchanges = service.marketplace.ProviderExchangesAPI(self._api_client) - self._provider_files = service.marketplace.ProviderFilesAPI(self._api_client) - self._provider_listings = service.marketplace.ProviderListingsAPI(self._api_client) - self._provider_personalization_requests = service.marketplace.ProviderPersonalizationRequestsAPI( - self._api_client - ) - self._provider_provider_analytics_dashboards = service.marketplace.ProviderProviderAnalyticsDashboardsAPI( + self._lakeview = pkg_dashboards.LakeviewAPI(self._api_client) + self._lakeview_embedded = pkg_dashboards.LakeviewEmbeddedAPI(self._api_client) + self._libraries = pkg_compute.LibrariesAPI(self._api_client) + self._metastores = pkg_catalog.MetastoresAPI(self._api_client) + self._model_registry = pkg_ml.ModelRegistryAPI(self._api_client) + self._model_versions = pkg_catalog.ModelVersionsAPI(self._api_client) + self._notification_destinations = pkg_settings.NotificationDestinationsAPI(self._api_client) + self._online_tables = pkg_catalog.OnlineTablesAPI(self._api_client) + self._permission_migration = pkg_iam.PermissionMigrationAPI(self._api_client) + self._permissions = pkg_iam.PermissionsAPI(self._api_client) + self._pipelines = pkg_pipelines.PipelinesAPI(self._api_client) + self._policy_compliance_for_clusters = pkg_compute.PolicyComplianceForClustersAPI(self._api_client) + self._policy_compliance_for_jobs = pkg_jobs.PolicyComplianceForJobsAPI(self._api_client) + self._policy_families = pkg_compute.PolicyFamiliesAPI(self._api_client) + self._provider_exchange_filters = pkg_marketplace.ProviderExchangeFiltersAPI(self._api_client) + self._provider_exchanges = pkg_marketplace.ProviderExchangesAPI(self._api_client) + self._provider_files = pkg_marketplace.ProviderFilesAPI(self._api_client) + self._provider_listings = pkg_marketplace.ProviderListingsAPI(self._api_client) + self._provider_personalization_requests = pkg_marketplace.ProviderPersonalizationRequestsAPI(self._api_client) + self._provider_provider_analytics_dashboards = pkg_marketplace.ProviderProviderAnalyticsDashboardsAPI( self._api_client ) - self._provider_providers = service.marketplace.ProviderProvidersAPI(self._api_client) - self._providers = service.sharing.ProvidersAPI(self._api_client) - self._quality_monitors = service.catalog.QualityMonitorsAPI(self._api_client) - self._queries = service.sql.QueriesAPI(self._api_client) - self._queries_legacy = service.sql.QueriesLegacyAPI(self._api_client) - self._query_execution = service.dashboards.QueryExecutionAPI(self._api_client) - self._query_history = service.sql.QueryHistoryAPI(self._api_client) - self._query_visualizations = service.sql.QueryVisualizationsAPI(self._api_client) - self._query_visualizations_legacy = service.sql.QueryVisualizationsLegacyAPI(self._api_client) - self._recipient_activation = service.sharing.RecipientActivationAPI(self._api_client) - self._recipient_federation_policies = service.sharing.RecipientFederationPoliciesAPI(self._api_client) - self._recipients = service.sharing.RecipientsAPI(self._api_client) - self._redash_config = service.sql.RedashConfigAPI(self._api_client) - self._registered_models = service.catalog.RegisteredModelsAPI(self._api_client) - self._repos = service.workspace.ReposAPI(self._api_client) - self._resource_quotas = service.catalog.ResourceQuotasAPI(self._api_client) - self._schemas = service.catalog.SchemasAPI(self._api_client) - self._secrets = service.workspace.SecretsAPI(self._api_client) - self._service_principals = service.iam.ServicePrincipalsAPI(self._api_client) + self._provider_providers = pkg_marketplace.ProviderProvidersAPI(self._api_client) + self._providers = pkg_sharing.ProvidersAPI(self._api_client) + self._quality_monitors = pkg_catalog.QualityMonitorsAPI(self._api_client) + self._queries = pkg_sql.QueriesAPI(self._api_client) + self._queries_legacy = pkg_sql.QueriesLegacyAPI(self._api_client) + self._query_execution = pkg_dashboards.QueryExecutionAPI(self._api_client) + self._query_history = pkg_sql.QueryHistoryAPI(self._api_client) + self._query_visualizations = pkg_sql.QueryVisualizationsAPI(self._api_client) + self._query_visualizations_legacy = pkg_sql.QueryVisualizationsLegacyAPI(self._api_client) + self._recipient_activation = pkg_sharing.RecipientActivationAPI(self._api_client) + self._recipient_federation_policies = pkg_sharing.RecipientFederationPoliciesAPI(self._api_client) + self._recipients = pkg_sharing.RecipientsAPI(self._api_client) + self._redash_config = pkg_sql.RedashConfigAPI(self._api_client) + self._registered_models = pkg_catalog.RegisteredModelsAPI(self._api_client) + self._repos = pkg_workspace.ReposAPI(self._api_client) + self._resource_quotas = pkg_catalog.ResourceQuotasAPI(self._api_client) + self._schemas = pkg_catalog.SchemasAPI(self._api_client) + self._secrets = pkg_workspace.SecretsAPI(self._api_client) + self._service_principals = pkg_iam.ServicePrincipalsAPI(self._api_client) self._serving_endpoints = serving_endpoints serving_endpoints_data_plane_token_source = DataPlaneTokenSource( self._config.host, self._config.oauth_token, self._config.disable_async_token_refresh ) - self._serving_endpoints_data_plane = service.serving.ServingEndpointsDataPlaneAPI( + self._serving_endpoints_data_plane = pkg_serving.ServingEndpointsDataPlaneAPI( self._api_client, serving_endpoints, serving_endpoints_data_plane_token_source ) - self._settings = service.settings.SettingsAPI(self._api_client) - self._shares = service.sharing.SharesAPI(self._api_client) - self._statement_execution = service.sql.StatementExecutionAPI(self._api_client) - self._storage_credentials = service.catalog.StorageCredentialsAPI(self._api_client) - self._system_schemas = service.catalog.SystemSchemasAPI(self._api_client) - self._table_constraints = service.catalog.TableConstraintsAPI(self._api_client) - self._tables = service.catalog.TablesAPI(self._api_client) - self._temporary_table_credentials = service.catalog.TemporaryTableCredentialsAPI(self._api_client) - self._token_management = service.settings.TokenManagementAPI(self._api_client) - self._tokens = service.settings.TokensAPI(self._api_client) - self._users = service.iam.UsersAPI(self._api_client) - self._vector_search_endpoints = service.vectorsearch.VectorSearchEndpointsAPI(self._api_client) - self._vector_search_indexes = service.vectorsearch.VectorSearchIndexesAPI(self._api_client) - self._volumes = service.catalog.VolumesAPI(self._api_client) - self._warehouses = service.sql.WarehousesAPI(self._api_client) + self._settings = pkg_settings.SettingsAPI(self._api_client) + self._shares = pkg_sharing.SharesAPI(self._api_client) + self._statement_execution = pkg_sql.StatementExecutionAPI(self._api_client) + self._storage_credentials = pkg_catalog.StorageCredentialsAPI(self._api_client) + self._system_schemas = pkg_catalog.SystemSchemasAPI(self._api_client) + self._table_constraints = pkg_catalog.TableConstraintsAPI(self._api_client) + self._tables = pkg_catalog.TablesAPI(self._api_client) + self._temporary_table_credentials = pkg_catalog.TemporaryTableCredentialsAPI(self._api_client) + self._token_management = pkg_settings.TokenManagementAPI(self._api_client) + self._tokens = pkg_settings.TokensAPI(self._api_client) + self._users = pkg_iam.UsersAPI(self._api_client) + self._vector_search_endpoints = pkg_vectorsearch.VectorSearchEndpointsAPI(self._api_client) + self._vector_search_indexes = pkg_vectorsearch.VectorSearchIndexesAPI(self._api_client) + self._volumes = pkg_catalog.VolumesAPI(self._api_client) + self._warehouses = pkg_sql.WarehousesAPI(self._api_client) self._workspace = WorkspaceExt(self._api_client) - self._workspace_bindings = service.catalog.WorkspaceBindingsAPI(self._api_client) - self._workspace_conf = service.settings.WorkspaceConfAPI(self._api_client) - self._forecasting = service.ml.ForecastingAPI(self._api_client) + self._workspace_bindings = pkg_catalog.WorkspaceBindingsAPI(self._api_client) + self._workspace_conf = pkg_settings.WorkspaceConfAPI(self._api_client) + self._forecasting = pkg_ml.ForecastingAPI(self._api_client) @property def config(self) -> client.Config: @@ -336,62 +351,62 @@ def dbutils(self) -> dbutils.RemoteDbUtils: return self._dbutils @property - def access_control(self) -> service.iam.AccessControlAPI: + def access_control(self) -> pkg_iam.AccessControlAPI: """Rule based Access Control for Databricks Resources.""" return self._access_control @property - def account_access_control_proxy(self) -> service.iam.AccountAccessControlProxyAPI: + def account_access_control_proxy(self) -> pkg_iam.AccountAccessControlProxyAPI: """These APIs manage access rules on resources in an account.""" return self._account_access_control_proxy @property - def alerts(self) -> service.sql.AlertsAPI: + def alerts(self) -> pkg_sql.AlertsAPI: """The alerts API can be used to perform CRUD operations on alerts.""" return self._alerts @property - def alerts_legacy(self) -> service.sql.AlertsLegacyAPI: + def alerts_legacy(self) -> pkg_sql.AlertsLegacyAPI: """The alerts API can be used to perform CRUD operations on alerts.""" return self._alerts_legacy @property - def alerts_v2(self) -> service.sql.AlertsV2API: + def alerts_v2(self) -> pkg_sql.AlertsV2API: """TODO: Add description.""" return self._alerts_v2 @property - def apps(self) -> service.apps.AppsAPI: + def apps(self) -> pkg_apps.AppsAPI: """Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on.""" return self._apps @property - def artifact_allowlists(self) -> service.catalog.ArtifactAllowlistsAPI: + def artifact_allowlists(self) -> pkg_catalog.ArtifactAllowlistsAPI: """In Databricks Runtime 13.3 and above, you can add libraries and init scripts to the `allowlist` in UC so that users can leverage these artifacts on compute configured with shared access mode.""" return self._artifact_allowlists @property - def catalogs(self) -> service.catalog.CatalogsAPI: + def catalogs(self) -> pkg_catalog.CatalogsAPI: """A catalog is the first layer of Unity Catalog’s three-level namespace.""" return self._catalogs @property - def clean_room_assets(self) -> service.cleanrooms.CleanRoomAssetsAPI: + def clean_room_assets(self) -> pkg_cleanrooms.CleanRoomAssetsAPI: """Clean room assets are data and code objects — Tables, volumes, and notebooks that are shared with the clean room.""" return self._clean_room_assets @property - def clean_room_task_runs(self) -> service.cleanrooms.CleanRoomTaskRunsAPI: + def clean_room_task_runs(self) -> pkg_cleanrooms.CleanRoomTaskRunsAPI: """Clean room task runs are the executions of notebooks in a clean room.""" return self._clean_room_task_runs @property - def clean_rooms(self) -> service.cleanrooms.CleanRoomsAPI: + def clean_rooms(self) -> pkg_cleanrooms.CleanRoomsAPI: """A clean room uses Delta Sharing and serverless compute to provide a secure and privacy-protecting environment where multiple parties can work together on sensitive enterprise data without direct access to each other’s data.""" return self._clean_rooms @property - def cluster_policies(self) -> service.compute.ClusterPoliciesAPI: + def cluster_policies(self) -> pkg_compute.ClusterPoliciesAPI: """You can use cluster policies to control users' ability to configure clusters based on a set of rules.""" return self._cluster_policies @@ -401,72 +416,72 @@ def clusters(self) -> ClustersExt: return self._clusters @property - def command_execution(self) -> service.compute.CommandExecutionAPI: + def command_execution(self) -> pkg_compute.CommandExecutionAPI: """This API allows execution of Python, Scala, SQL, or R commands on running Databricks Clusters.""" return self._command_execution @property - def connections(self) -> service.catalog.ConnectionsAPI: + def connections(self) -> pkg_catalog.ConnectionsAPI: """Connections allow for creating a connection to an external data source.""" return self._connections @property - def consumer_fulfillments(self) -> service.marketplace.ConsumerFulfillmentsAPI: + def consumer_fulfillments(self) -> pkg_marketplace.ConsumerFulfillmentsAPI: """Fulfillments are entities that allow consumers to preview installations.""" return self._consumer_fulfillments @property - def consumer_installations(self) -> service.marketplace.ConsumerInstallationsAPI: + def consumer_installations(self) -> pkg_marketplace.ConsumerInstallationsAPI: """Installations are entities that allow consumers to interact with Databricks Marketplace listings.""" return self._consumer_installations @property - def consumer_listings(self) -> service.marketplace.ConsumerListingsAPI: + def consumer_listings(self) -> pkg_marketplace.ConsumerListingsAPI: """Listings are the core entities in the Marketplace.""" return self._consumer_listings @property - def consumer_personalization_requests(self) -> service.marketplace.ConsumerPersonalizationRequestsAPI: + def consumer_personalization_requests(self) -> pkg_marketplace.ConsumerPersonalizationRequestsAPI: """Personalization Requests allow customers to interact with the individualized Marketplace listing flow.""" return self._consumer_personalization_requests @property - def consumer_providers(self) -> service.marketplace.ConsumerProvidersAPI: + def consumer_providers(self) -> pkg_marketplace.ConsumerProvidersAPI: """Providers are the entities that publish listings to the Marketplace.""" return self._consumer_providers @property - def credentials(self) -> service.catalog.CredentialsAPI: + def credentials(self) -> pkg_catalog.CredentialsAPI: """A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant.""" return self._credentials @property - def credentials_manager(self) -> service.settings.CredentialsManagerAPI: + def credentials_manager(self) -> pkg_settings.CredentialsManagerAPI: """Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens.""" return self._credentials_manager @property - def current_user(self) -> service.iam.CurrentUserAPI: + def current_user(self) -> pkg_iam.CurrentUserAPI: """This API allows retrieving information about currently authenticated user or service principal.""" return self._current_user @property - def dashboard_widgets(self) -> service.sql.DashboardWidgetsAPI: + def dashboard_widgets(self) -> pkg_sql.DashboardWidgetsAPI: """This is an evolving API that facilitates the addition and removal of widgets from existing dashboards within the Databricks Workspace.""" return self._dashboard_widgets @property - def dashboards(self) -> service.sql.DashboardsAPI: + def dashboards(self) -> pkg_sql.DashboardsAPI: """In general, there is little need to modify dashboards using the API.""" return self._dashboards @property - def data_sources(self) -> service.sql.DataSourcesAPI: + def data_sources(self) -> pkg_sql.DataSourcesAPI: """This API is provided to assist you in making new query objects.""" return self._data_sources @property - def database_instances(self) -> service.catalog.DatabaseInstancesAPI: + def database_instances(self) -> pkg_catalog.DatabaseInstancesAPI: """Database Instances provide access to a database via REST API or direct SQL.""" return self._database_instances @@ -476,67 +491,67 @@ def dbfs(self) -> DbfsExt: return self._dbfs @property - def dbsql_permissions(self) -> service.sql.DbsqlPermissionsAPI: + def dbsql_permissions(self) -> pkg_sql.DbsqlPermissionsAPI: """The SQL Permissions API is similar to the endpoints of the :method:permissions/set.""" return self._dbsql_permissions @property - def experiments(self) -> service.ml.ExperimentsAPI: + def experiments(self) -> pkg_ml.ExperimentsAPI: """Experiments are the primary unit of organization in MLflow; all MLflow runs belong to an experiment.""" return self._experiments @property - def external_locations(self) -> service.catalog.ExternalLocationsAPI: + def external_locations(self) -> pkg_catalog.ExternalLocationsAPI: """An external location is an object that combines a cloud storage path with a storage credential that authorizes access to the cloud storage path.""" return self._external_locations @property - def files(self) -> service.files.FilesAPI: + def files(self) -> pkg_files.FilesAPI: """The Files API is a standard HTTP API that allows you to read, write, list, and delete files and directories by referring to their URI.""" return self._files @property - def functions(self) -> service.catalog.FunctionsAPI: + def functions(self) -> pkg_catalog.FunctionsAPI: """Functions implement User-Defined Functions (UDFs) in Unity Catalog.""" return self._functions @property - def genie(self) -> service.dashboards.GenieAPI: + def genie(self) -> pkg_dashboards.GenieAPI: """Genie provides a no-code experience for business users, powered by AI/BI.""" return self._genie @property - def git_credentials(self) -> service.workspace.GitCredentialsAPI: + def git_credentials(self) -> pkg_workspace.GitCredentialsAPI: """Registers personal access token for Databricks to do operations on behalf of the user.""" return self._git_credentials @property - def global_init_scripts(self) -> service.compute.GlobalInitScriptsAPI: + def global_init_scripts(self) -> pkg_compute.GlobalInitScriptsAPI: """The Global Init Scripts API enables Workspace administrators to configure global initialization scripts for their workspace.""" return self._global_init_scripts @property - def grants(self) -> service.catalog.GrantsAPI: + def grants(self) -> pkg_catalog.GrantsAPI: """In Unity Catalog, data is secure by default.""" return self._grants @property - def groups(self) -> service.iam.GroupsAPI: + def groups(self) -> pkg_iam.GroupsAPI: """Groups simplify identity management, making it easier to assign access to Databricks workspace, data, and other securable objects.""" return self._groups @property - def instance_pools(self) -> service.compute.InstancePoolsAPI: + def instance_pools(self) -> pkg_compute.InstancePoolsAPI: """Instance Pools API are used to create, edit, delete and list instance pools by using ready-to-use cloud instances which reduces a cluster start and auto-scaling times.""" return self._instance_pools @property - def instance_profiles(self) -> service.compute.InstanceProfilesAPI: + def instance_profiles(self) -> pkg_compute.InstanceProfilesAPI: """The Instance Profiles API allows admins to add, list, and remove instance profiles that users can launch clusters with.""" return self._instance_profiles @property - def ip_access_lists(self) -> service.settings.IpAccessListsAPI: + def ip_access_lists(self) -> pkg_settings.IpAccessListsAPI: """IP Access List enables admins to configure IP access lists.""" return self._ip_access_lists @@ -546,197 +561,197 @@ def jobs(self) -> JobsExt: return self._jobs @property - def lakeview(self) -> service.dashboards.LakeviewAPI: + def lakeview(self) -> pkg_dashboards.LakeviewAPI: """These APIs provide specific management operations for Lakeview dashboards.""" return self._lakeview @property - def lakeview_embedded(self) -> service.dashboards.LakeviewEmbeddedAPI: + def lakeview_embedded(self) -> pkg_dashboards.LakeviewEmbeddedAPI: """Token-based Lakeview APIs for embedding dashboards in external applications.""" return self._lakeview_embedded @property - def libraries(self) -> service.compute.LibrariesAPI: + def libraries(self) -> pkg_compute.LibrariesAPI: """The Libraries API allows you to install and uninstall libraries and get the status of libraries on a cluster.""" return self._libraries @property - def metastores(self) -> service.catalog.MetastoresAPI: + def metastores(self) -> pkg_catalog.MetastoresAPI: """A metastore is the top-level container of objects in Unity Catalog.""" return self._metastores @property - def model_registry(self) -> service.ml.ModelRegistryAPI: + def model_registry(self) -> pkg_ml.ModelRegistryAPI: """Note: This API reference documents APIs for the Workspace Model Registry.""" return self._model_registry @property - def model_versions(self) -> service.catalog.ModelVersionsAPI: + def model_versions(self) -> pkg_catalog.ModelVersionsAPI: """Databricks provides a hosted version of MLflow Model Registry in Unity Catalog.""" return self._model_versions @property - def notification_destinations(self) -> service.settings.NotificationDestinationsAPI: + def notification_destinations(self) -> pkg_settings.NotificationDestinationsAPI: """The notification destinations API lets you programmatically manage a workspace's notification destinations.""" return self._notification_destinations @property - def online_tables(self) -> service.catalog.OnlineTablesAPI: + def online_tables(self) -> pkg_catalog.OnlineTablesAPI: """Online tables provide lower latency and higher QPS access to data from Delta tables.""" return self._online_tables @property - def permission_migration(self) -> service.iam.PermissionMigrationAPI: + def permission_migration(self) -> pkg_iam.PermissionMigrationAPI: """APIs for migrating acl permissions, used only by the ucx tool: https://github.com/databrickslabs/ucx.""" return self._permission_migration @property - def permissions(self) -> service.iam.PermissionsAPI: + def permissions(self) -> pkg_iam.PermissionsAPI: """Permissions API are used to create read, write, edit, update and manage access for various users on different objects and endpoints.""" return self._permissions @property - def pipelines(self) -> service.pipelines.PipelinesAPI: + def pipelines(self) -> pkg_pipelines.PipelinesAPI: """The Delta Live Tables API allows you to create, edit, delete, start, and view details about pipelines.""" return self._pipelines @property - def policy_compliance_for_clusters(self) -> service.compute.PolicyComplianceForClustersAPI: + def policy_compliance_for_clusters(self) -> pkg_compute.PolicyComplianceForClustersAPI: """The policy compliance APIs allow you to view and manage the policy compliance status of clusters in your workspace.""" return self._policy_compliance_for_clusters @property - def policy_compliance_for_jobs(self) -> service.jobs.PolicyComplianceForJobsAPI: + def policy_compliance_for_jobs(self) -> pkg_jobs.PolicyComplianceForJobsAPI: """The compliance APIs allow you to view and manage the policy compliance status of jobs in your workspace.""" return self._policy_compliance_for_jobs @property - def policy_families(self) -> service.compute.PolicyFamiliesAPI: + def policy_families(self) -> pkg_compute.PolicyFamiliesAPI: """View available policy families.""" return self._policy_families @property - def provider_exchange_filters(self) -> service.marketplace.ProviderExchangeFiltersAPI: + def provider_exchange_filters(self) -> pkg_marketplace.ProviderExchangeFiltersAPI: """Marketplace exchanges filters curate which groups can access an exchange.""" return self._provider_exchange_filters @property - def provider_exchanges(self) -> service.marketplace.ProviderExchangesAPI: + def provider_exchanges(self) -> pkg_marketplace.ProviderExchangesAPI: """Marketplace exchanges allow providers to share their listings with a curated set of customers.""" return self._provider_exchanges @property - def provider_files(self) -> service.marketplace.ProviderFilesAPI: + def provider_files(self) -> pkg_marketplace.ProviderFilesAPI: """Marketplace offers a set of file APIs for various purposes such as preview notebooks and provider icons.""" return self._provider_files @property - def provider_listings(self) -> service.marketplace.ProviderListingsAPI: + def provider_listings(self) -> pkg_marketplace.ProviderListingsAPI: """Listings are the core entities in the Marketplace.""" return self._provider_listings @property - def provider_personalization_requests(self) -> service.marketplace.ProviderPersonalizationRequestsAPI: + def provider_personalization_requests(self) -> pkg_marketplace.ProviderPersonalizationRequestsAPI: """Personalization requests are an alternate to instantly available listings.""" return self._provider_personalization_requests @property - def provider_provider_analytics_dashboards(self) -> service.marketplace.ProviderProviderAnalyticsDashboardsAPI: + def provider_provider_analytics_dashboards(self) -> pkg_marketplace.ProviderProviderAnalyticsDashboardsAPI: """Manage templated analytics solution for providers.""" return self._provider_provider_analytics_dashboards @property - def provider_providers(self) -> service.marketplace.ProviderProvidersAPI: + def provider_providers(self) -> pkg_marketplace.ProviderProvidersAPI: """Providers are entities that manage assets in Marketplace.""" return self._provider_providers @property - def providers(self) -> service.sharing.ProvidersAPI: + def providers(self) -> pkg_sharing.ProvidersAPI: """A data provider is an object representing the organization in the real world who shares the data.""" return self._providers @property - def quality_monitors(self) -> service.catalog.QualityMonitorsAPI: + def quality_monitors(self) -> pkg_catalog.QualityMonitorsAPI: """A monitor computes and monitors data or model quality metrics for a table over time.""" return self._quality_monitors @property - def queries(self) -> service.sql.QueriesAPI: + def queries(self) -> pkg_sql.QueriesAPI: """The queries API can be used to perform CRUD operations on queries.""" return self._queries @property - def queries_legacy(self) -> service.sql.QueriesLegacyAPI: + def queries_legacy(self) -> pkg_sql.QueriesLegacyAPI: """These endpoints are used for CRUD operations on query definitions.""" return self._queries_legacy @property - def query_execution(self) -> service.dashboards.QueryExecutionAPI: + def query_execution(self) -> pkg_dashboards.QueryExecutionAPI: """Query execution APIs for AI / BI Dashboards.""" return self._query_execution @property - def query_history(self) -> service.sql.QueryHistoryAPI: + def query_history(self) -> pkg_sql.QueryHistoryAPI: """A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute.""" return self._query_history @property - def query_visualizations(self) -> service.sql.QueryVisualizationsAPI: + def query_visualizations(self) -> pkg_sql.QueryVisualizationsAPI: """This is an evolving API that facilitates the addition and removal of visualizations from existing queries in the Databricks Workspace.""" return self._query_visualizations @property - def query_visualizations_legacy(self) -> service.sql.QueryVisualizationsLegacyAPI: + def query_visualizations_legacy(self) -> pkg_sql.QueryVisualizationsLegacyAPI: """This is an evolving API that facilitates the addition and removal of vizualisations from existing queries within the Databricks Workspace.""" return self._query_visualizations_legacy @property - def recipient_activation(self) -> service.sharing.RecipientActivationAPI: + def recipient_activation(self) -> pkg_sharing.RecipientActivationAPI: """The Recipient Activation API is only applicable in the open sharing model where the recipient object has the authentication type of `TOKEN`.""" return self._recipient_activation @property - def recipient_federation_policies(self) -> service.sharing.RecipientFederationPoliciesAPI: + def recipient_federation_policies(self) -> pkg_sharing.RecipientFederationPoliciesAPI: """The Recipient Federation Policies APIs are only applicable in the open sharing model where the recipient object has the authentication type of `OIDC_RECIPIENT`, enabling data sharing from Databricks to non-Databricks recipients.""" return self._recipient_federation_policies @property - def recipients(self) -> service.sharing.RecipientsAPI: + def recipients(self) -> pkg_sharing.RecipientsAPI: """A recipient is an object you create using :method:recipients/create to represent an organization which you want to allow access shares.""" return self._recipients @property - def redash_config(self) -> service.sql.RedashConfigAPI: + def redash_config(self) -> pkg_sql.RedashConfigAPI: """Redash V2 service for workspace configurations (internal).""" return self._redash_config @property - def registered_models(self) -> service.catalog.RegisteredModelsAPI: + def registered_models(self) -> pkg_catalog.RegisteredModelsAPI: """Databricks provides a hosted version of MLflow Model Registry in Unity Catalog.""" return self._registered_models @property - def repos(self) -> service.workspace.ReposAPI: + def repos(self) -> pkg_workspace.ReposAPI: """The Repos API allows users to manage their git repos.""" return self._repos @property - def resource_quotas(self) -> service.catalog.ResourceQuotasAPI: + def resource_quotas(self) -> pkg_catalog.ResourceQuotasAPI: """Unity Catalog enforces resource quotas on all securable objects, which limits the number of resources that can be created.""" return self._resource_quotas @property - def schemas(self) -> service.catalog.SchemasAPI: + def schemas(self) -> pkg_catalog.SchemasAPI: """A schema (also called a database) is the second layer of Unity Catalog’s three-level namespace.""" return self._schemas @property - def secrets(self) -> service.workspace.SecretsAPI: + def secrets(self) -> pkg_workspace.SecretsAPI: """The Secrets API allows you to manage secrets, secret scopes, and access permissions.""" return self._secrets @property - def service_principals(self) -> service.iam.ServicePrincipalsAPI: + def service_principals(self) -> pkg_iam.ServicePrincipalsAPI: """Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms.""" return self._service_principals @@ -746,82 +761,82 @@ def serving_endpoints(self) -> ServingEndpointsExt: return self._serving_endpoints @property - def serving_endpoints_data_plane(self) -> service.serving.ServingEndpointsDataPlaneAPI: + def serving_endpoints_data_plane(self) -> pkg_serving.ServingEndpointsDataPlaneAPI: """Serving endpoints DataPlane provides a set of operations to interact with data plane endpoints for Serving endpoints service.""" return self._serving_endpoints_data_plane @property - def settings(self) -> service.settings.SettingsAPI: + def settings(self) -> pkg_settings.SettingsAPI: """Workspace Settings API allows users to manage settings at the workspace level.""" return self._settings @property - def shares(self) -> service.sharing.SharesAPI: + def shares(self) -> pkg_sharing.SharesAPI: """A share is a container instantiated with :method:shares/create.""" return self._shares @property - def statement_execution(self) -> service.sql.StatementExecutionAPI: + def statement_execution(self) -> pkg_sql.StatementExecutionAPI: """The Databricks SQL Statement Execution API can be used to execute SQL statements on a SQL warehouse and fetch the result.""" return self._statement_execution @property - def storage_credentials(self) -> service.catalog.StorageCredentialsAPI: + def storage_credentials(self) -> pkg_catalog.StorageCredentialsAPI: """A storage credential represents an authentication and authorization mechanism for accessing data stored on your cloud tenant.""" return self._storage_credentials @property - def system_schemas(self) -> service.catalog.SystemSchemasAPI: + def system_schemas(self) -> pkg_catalog.SystemSchemasAPI: """A system schema is a schema that lives within the system catalog.""" return self._system_schemas @property - def table_constraints(self) -> service.catalog.TableConstraintsAPI: + def table_constraints(self) -> pkg_catalog.TableConstraintsAPI: """Primary key and foreign key constraints encode relationships between fields in tables.""" return self._table_constraints @property - def tables(self) -> service.catalog.TablesAPI: + def tables(self) -> pkg_catalog.TablesAPI: """A table resides in the third layer of Unity Catalog’s three-level namespace.""" return self._tables @property - def temporary_table_credentials(self) -> service.catalog.TemporaryTableCredentialsAPI: + def temporary_table_credentials(self) -> pkg_catalog.TemporaryTableCredentialsAPI: """Temporary Table Credentials refer to short-lived, downscoped credentials used to access cloud storage locationswhere table data is stored in Databricks.""" return self._temporary_table_credentials @property - def token_management(self) -> service.settings.TokenManagementAPI: + def token_management(self) -> pkg_settings.TokenManagementAPI: """Enables administrators to get all tokens and delete tokens for other users.""" return self._token_management @property - def tokens(self) -> service.settings.TokensAPI: + def tokens(self) -> pkg_settings.TokensAPI: """The Token API allows you to create, list, and revoke tokens that can be used to authenticate and access Databricks REST APIs.""" return self._tokens @property - def users(self) -> service.iam.UsersAPI: + def users(self) -> pkg_iam.UsersAPI: """User identities recognized by Databricks and represented by email addresses.""" return self._users @property - def vector_search_endpoints(self) -> service.vectorsearch.VectorSearchEndpointsAPI: + def vector_search_endpoints(self) -> pkg_vectorsearch.VectorSearchEndpointsAPI: """**Endpoint**: Represents the compute resources to host vector search indexes.""" return self._vector_search_endpoints @property - def vector_search_indexes(self) -> service.vectorsearch.VectorSearchIndexesAPI: + def vector_search_indexes(self) -> pkg_vectorsearch.VectorSearchIndexesAPI: """**Index**: An efficient representation of your embedding vectors that supports real-time and efficient approximate nearest neighbor (ANN) search queries.""" return self._vector_search_indexes @property - def volumes(self) -> service.catalog.VolumesAPI: + def volumes(self) -> pkg_catalog.VolumesAPI: """Volumes are a Unity Catalog (UC) capability for accessing, storing, governing, organizing and processing files.""" return self._volumes @property - def warehouses(self) -> service.sql.WarehousesAPI: + def warehouses(self) -> pkg_sql.WarehousesAPI: """A SQL warehouse is a compute resource that lets you run SQL commands on data objects within Databricks SQL.""" return self._warehouses @@ -831,17 +846,17 @@ def workspace(self) -> WorkspaceExt: return self._workspace @property - def workspace_bindings(self) -> service.catalog.WorkspaceBindingsAPI: + def workspace_bindings(self) -> pkg_catalog.WorkspaceBindingsAPI: """A securable in Databricks can be configured as __OPEN__ or __ISOLATED__.""" return self._workspace_bindings @property - def workspace_conf(self) -> service.settings.WorkspaceConfAPI: + def workspace_conf(self) -> pkg_settings.WorkspaceConfAPI: """This API allows updating known workspace settings for advanced users.""" return self._workspace_conf @property - def forecasting(self) -> service.ml.ForecastingAPI: + def forecasting(self) -> pkg_ml.ForecastingAPI: """The Forecasting API allows you to create and get serverless forecasting experiments.""" return self._forecasting @@ -919,37 +934,37 @@ def __init__( ) self._config = config.copy() self._api_client = client.ApiClient(self._config) - self._access_control = service.iam.AccountAccessControlAPI(self._api_client) - self._billable_usage = service.billing.BillableUsageAPI(self._api_client) - self._budget_policy = service.billing.BudgetPolicyAPI(self._api_client) - self._credentials = service.provisioning.CredentialsAPI(self._api_client) - self._custom_app_integration = service.oauth2.CustomAppIntegrationAPI(self._api_client) - self._encryption_keys = service.provisioning.EncryptionKeysAPI(self._api_client) - self._federation_policy = service.oauth2.AccountFederationPolicyAPI(self._api_client) - self._groups = service.iam.AccountGroupsAPI(self._api_client) - self._ip_access_lists = service.settings.AccountIpAccessListsAPI(self._api_client) - self._log_delivery = service.billing.LogDeliveryAPI(self._api_client) - self._metastore_assignments = service.catalog.AccountMetastoreAssignmentsAPI(self._api_client) - self._metastores = service.catalog.AccountMetastoresAPI(self._api_client) - self._network_connectivity = service.settings.NetworkConnectivityAPI(self._api_client) - self._network_policies = service.settings.NetworkPoliciesAPI(self._api_client) - self._networks = service.provisioning.NetworksAPI(self._api_client) - self._o_auth_published_apps = service.oauth2.OAuthPublishedAppsAPI(self._api_client) - self._private_access = service.provisioning.PrivateAccessAPI(self._api_client) - self._published_app_integration = service.oauth2.PublishedAppIntegrationAPI(self._api_client) - self._service_principal_federation_policy = service.oauth2.ServicePrincipalFederationPolicyAPI(self._api_client) - self._service_principal_secrets = service.oauth2.ServicePrincipalSecretsAPI(self._api_client) - self._service_principals = service.iam.AccountServicePrincipalsAPI(self._api_client) - self._settings = service.settings.AccountSettingsAPI(self._api_client) - self._storage = service.provisioning.StorageAPI(self._api_client) - self._storage_credentials = service.catalog.AccountStorageCredentialsAPI(self._api_client) - self._usage_dashboards = service.billing.UsageDashboardsAPI(self._api_client) - self._users = service.iam.AccountUsersAPI(self._api_client) - self._vpc_endpoints = service.provisioning.VpcEndpointsAPI(self._api_client) - self._workspace_assignment = service.iam.WorkspaceAssignmentAPI(self._api_client) - self._workspace_network_configuration = service.settings.WorkspaceNetworkConfigurationAPI(self._api_client) - self._workspaces = service.provisioning.WorkspacesAPI(self._api_client) - self._budgets = service.billing.BudgetsAPI(self._api_client) + self._access_control = pkg_iam.AccountAccessControlAPI(self._api_client) + self._billable_usage = pkg_billing.BillableUsageAPI(self._api_client) + self._budget_policy = pkg_billing.BudgetPolicyAPI(self._api_client) + self._credentials = pkg_provisioning.CredentialsAPI(self._api_client) + self._custom_app_integration = pkg_oauth2.CustomAppIntegrationAPI(self._api_client) + self._encryption_keys = pkg_provisioning.EncryptionKeysAPI(self._api_client) + self._federation_policy = pkg_oauth2.AccountFederationPolicyAPI(self._api_client) + self._groups = pkg_iam.AccountGroupsAPI(self._api_client) + self._ip_access_lists = pkg_settings.AccountIpAccessListsAPI(self._api_client) + self._log_delivery = pkg_billing.LogDeliveryAPI(self._api_client) + self._metastore_assignments = pkg_catalog.AccountMetastoreAssignmentsAPI(self._api_client) + self._metastores = pkg_catalog.AccountMetastoresAPI(self._api_client) + self._network_connectivity = pkg_settings.NetworkConnectivityAPI(self._api_client) + self._network_policies = pkg_settings.NetworkPoliciesAPI(self._api_client) + self._networks = pkg_provisioning.NetworksAPI(self._api_client) + self._o_auth_published_apps = pkg_oauth2.OAuthPublishedAppsAPI(self._api_client) + self._private_access = pkg_provisioning.PrivateAccessAPI(self._api_client) + self._published_app_integration = pkg_oauth2.PublishedAppIntegrationAPI(self._api_client) + self._service_principal_federation_policy = pkg_oauth2.ServicePrincipalFederationPolicyAPI(self._api_client) + self._service_principal_secrets = pkg_oauth2.ServicePrincipalSecretsAPI(self._api_client) + self._service_principals = pkg_iam.AccountServicePrincipalsAPI(self._api_client) + self._settings = pkg_settings.AccountSettingsAPI(self._api_client) + self._storage = pkg_provisioning.StorageAPI(self._api_client) + self._storage_credentials = pkg_catalog.AccountStorageCredentialsAPI(self._api_client) + self._usage_dashboards = pkg_billing.UsageDashboardsAPI(self._api_client) + self._users = pkg_iam.AccountUsersAPI(self._api_client) + self._vpc_endpoints = pkg_provisioning.VpcEndpointsAPI(self._api_client) + self._workspace_assignment = pkg_iam.WorkspaceAssignmentAPI(self._api_client) + self._workspace_network_configuration = pkg_settings.WorkspaceNetworkConfigurationAPI(self._api_client) + self._workspaces = pkg_provisioning.WorkspacesAPI(self._api_client) + self._budgets = pkg_billing.BudgetsAPI(self._api_client) @property def config(self) -> client.Config: @@ -960,157 +975,157 @@ def api_client(self) -> client.ApiClient: return self._api_client @property - def access_control(self) -> service.iam.AccountAccessControlAPI: + def access_control(self) -> pkg_iam.AccountAccessControlAPI: """These APIs manage access rules on resources in an account.""" return self._access_control @property - def billable_usage(self) -> service.billing.BillableUsageAPI: + def billable_usage(self) -> pkg_billing.BillableUsageAPI: """This API allows you to download billable usage logs for the specified account and date range.""" return self._billable_usage @property - def budget_policy(self) -> service.billing.BudgetPolicyAPI: + def budget_policy(self) -> pkg_billing.BudgetPolicyAPI: """A service serves REST API about Budget policies.""" return self._budget_policy @property - def credentials(self) -> service.provisioning.CredentialsAPI: + def credentials(self) -> pkg_provisioning.CredentialsAPI: """These APIs manage credential configurations for this workspace.""" return self._credentials @property - def custom_app_integration(self) -> service.oauth2.CustomAppIntegrationAPI: + def custom_app_integration(self) -> pkg_oauth2.CustomAppIntegrationAPI: """These APIs enable administrators to manage custom OAuth app integrations, which is required for adding/using Custom OAuth App Integration like Tableau Cloud for Databricks in AWS cloud.""" return self._custom_app_integration @property - def encryption_keys(self) -> service.provisioning.EncryptionKeysAPI: + def encryption_keys(self) -> pkg_provisioning.EncryptionKeysAPI: """These APIs manage encryption key configurations for this workspace (optional).""" return self._encryption_keys @property - def federation_policy(self) -> service.oauth2.AccountFederationPolicyAPI: + def federation_policy(self) -> pkg_oauth2.AccountFederationPolicyAPI: """These APIs manage account federation policies.""" return self._federation_policy @property - def groups(self) -> service.iam.AccountGroupsAPI: + def groups(self) -> pkg_iam.AccountGroupsAPI: """Groups simplify identity management, making it easier to assign access to Databricks account, data, and other securable objects.""" return self._groups @property - def ip_access_lists(self) -> service.settings.AccountIpAccessListsAPI: + def ip_access_lists(self) -> pkg_settings.AccountIpAccessListsAPI: """The Accounts IP Access List API enables account admins to configure IP access lists for access to the account console.""" return self._ip_access_lists @property - def log_delivery(self) -> service.billing.LogDeliveryAPI: + def log_delivery(self) -> pkg_billing.LogDeliveryAPI: """These APIs manage log delivery configurations for this account.""" return self._log_delivery @property - def metastore_assignments(self) -> service.catalog.AccountMetastoreAssignmentsAPI: + def metastore_assignments(self) -> pkg_catalog.AccountMetastoreAssignmentsAPI: """These APIs manage metastore assignments to a workspace.""" return self._metastore_assignments @property - def metastores(self) -> service.catalog.AccountMetastoresAPI: + def metastores(self) -> pkg_catalog.AccountMetastoresAPI: """These APIs manage Unity Catalog metastores for an account.""" return self._metastores @property - def network_connectivity(self) -> service.settings.NetworkConnectivityAPI: + def network_connectivity(self) -> pkg_settings.NetworkConnectivityAPI: """These APIs provide configurations for the network connectivity of your workspaces for serverless compute resources.""" return self._network_connectivity @property - def network_policies(self) -> service.settings.NetworkPoliciesAPI: + def network_policies(self) -> pkg_settings.NetworkPoliciesAPI: """These APIs manage network policies for this account.""" return self._network_policies @property - def networks(self) -> service.provisioning.NetworksAPI: + def networks(self) -> pkg_provisioning.NetworksAPI: """These APIs manage network configurations for customer-managed VPCs (optional).""" return self._networks @property - def o_auth_published_apps(self) -> service.oauth2.OAuthPublishedAppsAPI: + def o_auth_published_apps(self) -> pkg_oauth2.OAuthPublishedAppsAPI: """These APIs enable administrators to view all the available published OAuth applications in Databricks.""" return self._o_auth_published_apps @property - def private_access(self) -> service.provisioning.PrivateAccessAPI: + def private_access(self) -> pkg_provisioning.PrivateAccessAPI: """These APIs manage private access settings for this account.""" return self._private_access @property - def published_app_integration(self) -> service.oauth2.PublishedAppIntegrationAPI: + def published_app_integration(self) -> pkg_oauth2.PublishedAppIntegrationAPI: """These APIs enable administrators to manage published OAuth app integrations, which is required for adding/using Published OAuth App Integration like Tableau Desktop for Databricks in AWS cloud.""" return self._published_app_integration @property - def service_principal_federation_policy(self) -> service.oauth2.ServicePrincipalFederationPolicyAPI: + def service_principal_federation_policy(self) -> pkg_oauth2.ServicePrincipalFederationPolicyAPI: """These APIs manage service principal federation policies.""" return self._service_principal_federation_policy @property - def service_principal_secrets(self) -> service.oauth2.ServicePrincipalSecretsAPI: + def service_principal_secrets(self) -> pkg_oauth2.ServicePrincipalSecretsAPI: """These APIs enable administrators to manage service principal secrets.""" return self._service_principal_secrets @property - def service_principals(self) -> service.iam.AccountServicePrincipalsAPI: + def service_principals(self) -> pkg_iam.AccountServicePrincipalsAPI: """Identities for use with jobs, automated tools, and systems such as scripts, apps, and CI/CD platforms.""" return self._service_principals @property - def settings(self) -> service.settings.AccountSettingsAPI: + def settings(self) -> pkg_settings.AccountSettingsAPI: """Accounts Settings API allows users to manage settings at the account level.""" return self._settings @property - def storage(self) -> service.provisioning.StorageAPI: + def storage(self) -> pkg_provisioning.StorageAPI: """These APIs manage storage configurations for this workspace.""" return self._storage @property - def storage_credentials(self) -> service.catalog.AccountStorageCredentialsAPI: + def storage_credentials(self) -> pkg_catalog.AccountStorageCredentialsAPI: """These APIs manage storage credentials for a particular metastore.""" return self._storage_credentials @property - def usage_dashboards(self) -> service.billing.UsageDashboardsAPI: + def usage_dashboards(self) -> pkg_billing.UsageDashboardsAPI: """These APIs manage usage dashboards for this account.""" return self._usage_dashboards @property - def users(self) -> service.iam.AccountUsersAPI: + def users(self) -> pkg_iam.AccountUsersAPI: """User identities recognized by Databricks and represented by email addresses.""" return self._users @property - def vpc_endpoints(self) -> service.provisioning.VpcEndpointsAPI: + def vpc_endpoints(self) -> pkg_provisioning.VpcEndpointsAPI: """These APIs manage VPC endpoint configurations for this account.""" return self._vpc_endpoints @property - def workspace_assignment(self) -> service.iam.WorkspaceAssignmentAPI: + def workspace_assignment(self) -> pkg_iam.WorkspaceAssignmentAPI: """The Workspace Permission Assignment API allows you to manage workspace permissions for principals in your account.""" return self._workspace_assignment @property - def workspace_network_configuration(self) -> service.settings.WorkspaceNetworkConfigurationAPI: + def workspace_network_configuration(self) -> pkg_settings.WorkspaceNetworkConfigurationAPI: """These APIs allow configuration of network settings for Databricks workspaces.""" return self._workspace_network_configuration @property - def workspaces(self) -> service.provisioning.WorkspacesAPI: + def workspaces(self) -> pkg_provisioning.WorkspacesAPI: """These APIs manage workspaces for this account.""" return self._workspaces @property - def budgets(self) -> service.billing.BudgetsAPI: + def budgets(self) -> pkg_billing.BudgetsAPI: """These APIs manage budget configurations for this account.""" return self._budgets diff --git a/docs/account/iam/access_control.rst b/docs/account/iam/access_control.rst index 4bf03b75e..475d28c07 100644 --- a/docs/account/iam/access_control.rst +++ b/docs/account/iam/access_control.rst @@ -18,6 +18,11 @@ :param resource: str The resource name for which assignable roles will be listed. + Examples | Summary :--- | :--- `resource=accounts/` | A resource name for the account. + `resource=accounts//groups/` | A resource name for the group. + `resource=accounts//servicePrincipals/` | A resource name for the service + principal. + :returns: :class:`GetAssignableRolesForResourceResponse` @@ -30,6 +35,12 @@ :param name: str The ruleset name associated with the request. + + Examples | Summary :--- | :--- `name=accounts//ruleSets/default` | A name for a rule set + on the account. `name=accounts//groups//ruleSets/default` | A name for a rule + set on the group. + `name=accounts//servicePrincipals//ruleSets/default` | + A name for a rule set on the service principal. :param etag: str Etag used for versioning. The response is at least as fresh as the eTag provided. Etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a rule set from @@ -38,6 +49,10 @@ etag from a GET rule set request, and pass it with the PUT update request to identify the rule set version you are updating. + Examples | Summary :--- | :--- `etag=` | An empty etag can only be used in GET to indicate no + freshness requirements. `etag=RENUAAABhSweA4NvVmmUYdiU717H3Tgy0UJdor3gE4a+mq/oj9NjAf8ZsQ==` | An + etag encoded a specific version of the rule set to get or to be updated. + :returns: :class:`RuleSetResponse` diff --git a/docs/account/iam/workspace_assignment.rst b/docs/account/iam/workspace_assignment.rst index 214bad364..33df20178 100644 --- a/docs/account/iam/workspace_assignment.rst +++ b/docs/account/iam/workspace_assignment.rst @@ -47,9 +47,9 @@ a = AccountClient() - workspace_id = os.environ["TEST_WORKSPACE_ID"] + workspace_id = os.environ["DUMMY_WORKSPACE_ID"] - all = a.workspace_assignment.list(list=workspace_id) + all = a.workspace_assignment.list(workspace_id=workspace_id) Get permission assignments. @@ -80,9 +80,9 @@ spn_id = spn.id - workspace_id = os.environ["TEST_WORKSPACE_ID"] + workspace_id = os.environ["DUMMY_WORKSPACE_ID"] - a.workspace_assignment.update( + _ = a.workspace_assignment.update( workspace_id=workspace_id, principal_id=spn_id, permissions=[iam.WorkspacePermission.USER], diff --git a/docs/account/provisioning/storage.rst b/docs/account/provisioning/storage.rst index 7b85e3be0..b8e144f8c 100644 --- a/docs/account/provisioning/storage.rst +++ b/docs/account/provisioning/storage.rst @@ -16,6 +16,7 @@ .. code-block:: + import os import time from databricks.sdk import AccountClient @@ -25,8 +26,11 @@ storage = a.storage.create( storage_configuration_name=f"sdk-{time.time_ns()}", - root_bucket_info=provisioning.RootBucketInfo(bucket_name=f"sdk-{time.time_ns()}"), + root_bucket_info=provisioning.RootBucketInfo(bucket_name=os.environ["TEST_ROOT_BUCKET"]), ) + + # cleanup + a.storage.delete(storage_configuration_id=storage.storage_configuration_id) Create new storage configuration. diff --git a/docs/account/settings/index.rst b/docs/account/settings/index.rst index 9ffe7694e..80913733c 100644 --- a/docs/account/settings/index.rst +++ b/docs/account/settings/index.rst @@ -9,9 +9,13 @@ Manage security settings for Accounts and Workspaces ip_access_lists network_connectivity + network_policies settings csp_enablement_account disable_legacy_features enable_ip_access_lists esm_enablement_account - personal_compute \ No newline at end of file + llm_proxy_partner_powered_account + llm_proxy_partner_powered_enforce + personal_compute + workspace_network_configuration \ No newline at end of file diff --git a/docs/account/settings/llm_proxy_partner_powered_account.rst b/docs/account/settings/llm_proxy_partner_powered_account.rst new file mode 100644 index 000000000..fe5a55183 --- /dev/null +++ b/docs/account/settings/llm_proxy_partner_powered_account.rst @@ -0,0 +1,46 @@ +``a.settings.llm_proxy_partner_powered_account``: Enable Partner Powered AI Features for Account +================================================================================================ +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: LlmProxyPartnerPoweredAccountAPI + + Determines if partner powered models are enabled or not for a specific account + + .. py:method:: get( [, etag: Optional[str]]) -> LlmProxyPartnerPoweredAccount + + Get the enable partner powered AI features account setting. + + Gets the enable partner powered AI features account setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`LlmProxyPartnerPoweredAccount` + + + .. py:method:: update(allow_missing: bool, setting: LlmProxyPartnerPoweredAccount, field_mask: str) -> LlmProxyPartnerPoweredAccount + + Update the enable partner powered AI features account setting. + + Updates the enable partner powered AI features account setting. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`LlmProxyPartnerPoweredAccount` + :param field_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`LlmProxyPartnerPoweredAccount` + \ No newline at end of file diff --git a/docs/account/settings/llm_proxy_partner_powered_enforce.rst b/docs/account/settings/llm_proxy_partner_powered_enforce.rst new file mode 100644 index 000000000..084b744e0 --- /dev/null +++ b/docs/account/settings/llm_proxy_partner_powered_enforce.rst @@ -0,0 +1,47 @@ +``a.settings.llm_proxy_partner_powered_enforce``: Enable Enforcement of Partner Powered AI Features +=================================================================================================== +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: LlmProxyPartnerPoweredEnforceAPI + + Determines if the account-level partner-powered setting value is enforced upon the workspace-level + partner-powered setting + + .. py:method:: get( [, etag: Optional[str]]) -> LlmProxyPartnerPoweredEnforce + + Get the enforcement status of partner powered AI features account setting. + + Gets the enforcement status of partner powered AI features account setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`LlmProxyPartnerPoweredEnforce` + + + .. py:method:: update(allow_missing: bool, setting: LlmProxyPartnerPoweredEnforce, field_mask: str) -> LlmProxyPartnerPoweredEnforce + + Update the enforcement status of partner powered AI features account setting. + + Updates the enable enforcement status of partner powered AI features account setting. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`LlmProxyPartnerPoweredEnforce` + :param field_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`LlmProxyPartnerPoweredEnforce` + \ No newline at end of file diff --git a/docs/account/settings/network_policies.rst b/docs/account/settings/network_policies.rst new file mode 100644 index 000000000..7eb489bb8 --- /dev/null +++ b/docs/account/settings/network_policies.rst @@ -0,0 +1,73 @@ +``a.network_policies``: Network Policies +======================================== +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: NetworkPoliciesAPI + + These APIs manage network policies for this account. Network policies control which network destinations + can be accessed from the Databricks environment. Each Databricks account includes a default policy named + 'default-policy'. 'default-policy' is associated with any workspace lacking an explicit network policy + assignment, and is automatically associated with each newly created workspace. 'default-policy' is + reserved and cannot be deleted, but it can be updated to customize the default network access rules for + your account. + + .. py:method:: create_network_policy_rpc(network_policy: AccountNetworkPolicy) -> AccountNetworkPolicy + + Create a network policy. + + Creates a new network policy to manage which network destinations can be accessed from the Databricks + environment. + + :param network_policy: :class:`AccountNetworkPolicy` + + :returns: :class:`AccountNetworkPolicy` + + + .. py:method:: delete_network_policy_rpc(network_policy_id: str) + + Delete a network policy. + + Deletes a network policy. Cannot be called on 'default-policy'. + + :param network_policy_id: str + The unique identifier of the network policy to delete. + + + + + .. py:method:: get_network_policy_rpc(network_policy_id: str) -> AccountNetworkPolicy + + Get a network policy. + + Gets a network policy. + + :param network_policy_id: str + The unique identifier of the network policy to retrieve. + + :returns: :class:`AccountNetworkPolicy` + + + .. py:method:: list_network_policies_rpc( [, page_token: Optional[str]]) -> Iterator[AccountNetworkPolicy] + + List network policies. + + Gets an array of network policies. + + :param page_token: str (optional) + Pagination token to go to next page based on previous query. + + :returns: Iterator over :class:`AccountNetworkPolicy` + + + .. py:method:: update_network_policy_rpc(network_policy_id: str, network_policy: AccountNetworkPolicy) -> AccountNetworkPolicy + + Update a network policy. + + Updates a network policy. This allows you to modify the configuration of a network policy. + + :param network_policy_id: str + The unique identifier for the network policy. + :param network_policy: :class:`AccountNetworkPolicy` + + :returns: :class:`AccountNetworkPolicy` + \ No newline at end of file diff --git a/docs/account/settings/settings.rst b/docs/account/settings/settings.rst index 19802700d..e96e06a7c 100644 --- a/docs/account/settings/settings.rst +++ b/docs/account/settings/settings.rst @@ -38,6 +38,17 @@ new workspaces. By default, this account-level setting is disabled for new workspaces. After workspace creation, account admins can enable enhanced security monitoring individually for each workspace. + .. py:property:: llm_proxy_partner_powered_account + :type: LlmProxyPartnerPoweredAccountAPI + + Determines if partner powered models are enabled or not for a specific account + + .. py:property:: llm_proxy_partner_powered_enforce + :type: LlmProxyPartnerPoweredEnforceAPI + + Determines if the account-level partner-powered setting value is enforced upon the workspace-level + partner-powered setting + .. py:property:: personal_compute :type: PersonalComputeAPI diff --git a/docs/account/settings/workspace_network_configuration.rst b/docs/account/settings/workspace_network_configuration.rst new file mode 100644 index 000000000..98ff84202 --- /dev/null +++ b/docs/account/settings/workspace_network_configuration.rst @@ -0,0 +1,39 @@ +``a.workspace_network_configuration``: Workspace Network Configuration +====================================================================== +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: WorkspaceNetworkConfigurationAPI + + These APIs allow configuration of network settings for Databricks workspaces. Each workspace is always + associated with exactly one network policy that controls which network destinations can be accessed from + the Databricks environment. By default, workspaces are associated with the 'default-policy' network + policy. You cannot create or delete a workspace's network configuration, only update it to associate the + workspace with a different policy. + + .. py:method:: get_workspace_network_option_rpc(workspace_id: int) -> WorkspaceNetworkOption + + Get workspace network configuration. + + Gets the network configuration for a workspace. Every workspace has exactly one network policy + binding, with 'default-policy' used if no explicit assignment exists. + + :param workspace_id: int + The workspace ID. + + :returns: :class:`WorkspaceNetworkOption` + + + .. py:method:: update_workspace_network_option_rpc(workspace_id: int, workspace_network_option: WorkspaceNetworkOption) -> WorkspaceNetworkOption + + Update workspace network configuration. + + Updates the network configuration for a workspace. This operation associates the workspace with the + specified network policy. To revert to the default policy, specify 'default-policy' as the + network_policy_id. + + :param workspace_id: int + The workspace ID. + :param workspace_network_option: :class:`WorkspaceNetworkOption` + + :returns: :class:`WorkspaceNetworkOption` + \ No newline at end of file diff --git a/docs/dbdataclasses/apps.rst b/docs/dbdataclasses/apps.rst index 2214e2ac9..bbd625c62 100644 --- a/docs/dbdataclasses/apps.rst +++ b/docs/dbdataclasses/apps.rst @@ -145,6 +145,23 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: IS_OWNER :value: "IS_OWNER" +.. autoclass:: AppResourceUcSecurable + :members: + :undoc-members: + +.. py:class:: AppResourceUcSecurableUcSecurablePermission + + .. py:attribute:: READ_VOLUME + :value: "READ_VOLUME" + + .. py:attribute:: WRITE_VOLUME + :value: "WRITE_VOLUME" + +.. py:class:: AppResourceUcSecurableUcSecurableType + + .. py:attribute:: VOLUME + :value: "VOLUME" + .. py:class:: ApplicationState .. py:attribute:: CRASHED diff --git a/docs/dbdataclasses/catalog.rst b/docs/dbdataclasses/catalog.rst index 98873f7fa..efbb6d06c 100644 --- a/docs/dbdataclasses/catalog.rst +++ b/docs/dbdataclasses/catalog.rst @@ -81,6 +81,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AwsSqsQueue + :members: + :undoc-members: + .. autoclass:: AzureActiveDirectoryToken :members: :undoc-members: @@ -97,6 +101,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AzureQueueStorage + :members: + :undoc-members: + .. autoclass:: AzureServicePrincipal :members: :undoc-members: @@ -115,8 +123,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: CatalogIsolationMode - Whether the current securable is accessible from all workspaces or a specific set of workspaces. - .. py:attribute:: ISOLATED :value: "ISOLATED" @@ -133,12 +139,21 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: FOREIGN_CATALOG :value: "FOREIGN_CATALOG" + .. py:attribute:: INTERNAL_CATALOG + :value: "INTERNAL_CATALOG" + .. py:attribute:: MANAGED_CATALOG :value: "MANAGED_CATALOG" + .. py:attribute:: MANAGED_ONLINE_CATALOG + :value: "MANAGED_ONLINE_CATALOG" + .. py:attribute:: SYSTEM_CATALOG :value: "SYSTEM_CATALOG" + .. py:attribute:: UNKNOWN_CATALOG_TYPE + :value: "UNKNOWN_CATALOG_TYPE" + .. autoclass:: CloudflareApiToken :members: :undoc-members: @@ -399,10 +414,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CurrentWorkspaceBindings - :members: - :undoc-members: - .. py:class:: DataSourceFormat Data source format @@ -476,6 +487,34 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: WORKDAY_RAAS_FORMAT :value: "WORKDAY_RAAS_FORMAT" +.. autoclass:: DatabaseCatalog + :members: + :undoc-members: + +.. autoclass:: DatabaseInstance + :members: + :undoc-members: + +.. py:class:: DatabaseInstanceState + + .. py:attribute:: AVAILABLE + :value: "AVAILABLE" + + .. py:attribute:: DELETING + :value: "DELETING" + + .. py:attribute:: FAILING_OVER + :value: "FAILING_OVER" + + .. py:attribute:: STARTING + :value: "STARTING" + + .. py:attribute:: STOPPED + :value: "STOPPED" + + .. py:attribute:: UPDATING + :value: "UPDATING" + .. autoclass:: DatabricksGcpServiceAccount :members: :undoc-members: @@ -496,10 +535,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeleteDatabaseCatalogResponse + :members: + :undoc-members: + +.. autoclass:: DeleteDatabaseInstanceResponse + :members: + :undoc-members: + .. autoclass:: DeleteResponse :members: :undoc-members: +.. autoclass:: DeleteSyncedDatabaseTableResponse + :members: + :undoc-members: + .. autoclass:: DeltaRuntimePropertiesKvPairs :members: :undoc-members: @@ -544,8 +595,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: EnablePredictiveOptimization - Whether predictive optimization should be enabled for this object and objects under it. - .. py:attribute:: DISABLE :value: "DISABLE" @@ -555,6 +604,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: INHERIT :value: "INHERIT" +.. autoclass:: EnableRequest + :members: + :undoc-members: + .. autoclass:: EnableResponse :members: :undoc-members: @@ -571,6 +624,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: FileEventQueue + :members: + :undoc-members: + .. autoclass:: ForeignKeyConstraint :members: :undoc-members: @@ -649,6 +706,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GcpPubsub + :members: + :undoc-members: + .. autoclass:: GenerateTemporaryServiceCredentialAzureOptions :members: :undoc-members: @@ -669,19 +730,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: GetBindingsSecurableType - - .. py:attribute:: CATALOG - :value: "CATALOG" - - .. py:attribute:: CREDENTIAL - :value: "CREDENTIAL" - - .. py:attribute:: EXTERNAL_LOCATION - :value: "EXTERNAL_LOCATION" - - .. py:attribute:: STORAGE_CREDENTIAL - :value: "STORAGE_CREDENTIAL" +.. autoclass:: GetCatalogWorkspaceBindingsResponse + :members: + :undoc-members: .. autoclass:: GetMetastoreSummaryResponse :members: @@ -701,6 +752,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GetWorkspaceBindingsResponse + :members: + :undoc-members: + .. py:class:: IsolationMode .. py:attribute:: ISOLATION_MODE_ISOLATED @@ -729,6 +784,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListDatabaseInstancesResponse + :members: + :undoc-members: + .. autoclass:: ListExternalLocationsResponse :members: :undoc-members: @@ -948,6 +1007,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: NewPipelineSpec + :members: + :undoc-members: + .. autoclass:: OnlineTable :members: :undoc-members: @@ -1232,7 +1295,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SecurableType - The type of Unity Catalog securable + The type of Unity Catalog securable. .. py:attribute:: CATALOG :value: "CATALOG" @@ -1249,6 +1312,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: EXTERNAL_LOCATION :value: "EXTERNAL_LOCATION" + .. py:attribute:: EXTERNAL_METADATA + :value: "EXTERNAL_METADATA" + .. py:attribute:: FUNCTION :value: "FUNCTION" @@ -1270,12 +1336,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SHARE :value: "SHARE" + .. py:attribute:: STAGING_TABLE + :value: "STAGING_TABLE" + .. py:attribute:: STORAGE_CREDENTIAL :value: "STORAGE_CREDENTIAL" .. py:attribute:: TABLE :value: "TABLE" + .. py:attribute:: UNKNOWN_SECURABLE_TYPE + :value: "UNKNOWN_SECURABLE_TYPE" + .. py:attribute:: VOLUME :value: "VOLUME" @@ -1293,8 +1365,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: SseEncryptionDetailsAlgorithm - The type of key encryption to use (affects headers from s3 client). - .. py:attribute:: AWS_SSE_KMS :value: "AWS_SSE_KMS" @@ -1305,28 +1375,28 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: SystemSchemaInfo +.. autoclass:: SyncedDatabaseTable :members: :undoc-members: -.. py:class:: SystemSchemaInfoState +.. py:class:: SyncedTableSchedulingPolicy - The current state of enablement for the system schema. An empty string means the system schema is available and ready for opt-in. + .. py:attribute:: CONTINUOUS + :value: "CONTINUOUS" - .. py:attribute:: AVAILABLE - :value: "AVAILABLE" - - .. py:attribute:: DISABLE_INITIALIZED - :value: "DISABLE_INITIALIZED" + .. py:attribute:: SNAPSHOT + :value: "SNAPSHOT" - .. py:attribute:: ENABLE_COMPLETED - :value: "ENABLE_COMPLETED" + .. py:attribute:: TRIGGERED + :value: "TRIGGERED" - .. py:attribute:: ENABLE_INITIALIZED - :value: "ENABLE_INITIALIZED" +.. autoclass:: SyncedTableSpec + :members: + :undoc-members: - .. py:attribute:: UNAVAILABLE - :value: "UNAVAILABLE" +.. autoclass:: SystemSchemaInfo + :members: + :undoc-members: .. autoclass:: TableConstraint :members: @@ -1406,24 +1476,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. py:class:: UpdateBindingsSecurableType - - .. py:attribute:: CATALOG - :value: "CATALOG" - - .. py:attribute:: CREDENTIAL - :value: "CREDENTIAL" - - .. py:attribute:: EXTERNAL_LOCATION - :value: "EXTERNAL_LOCATION" - - .. py:attribute:: STORAGE_CREDENTIAL - :value: "STORAGE_CREDENTIAL" - .. autoclass:: UpdateCatalog :members: :undoc-members: +.. autoclass:: UpdateCatalogWorkspaceBindingsResponse + :members: + :undoc-members: + .. autoclass:: UpdateConnection :members: :undoc-members: @@ -1498,6 +1558,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateWorkspaceBindingsResponse + :members: + :undoc-members: + .. autoclass:: ValidateCredentialRequest :members: :undoc-members: @@ -1584,12 +1648,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: WorkspaceBindingBindingType + Using `BINDING_TYPE_` prefix here to avoid conflict with `TableOperation` enum in `credentials_common.proto`. + .. py:attribute:: BINDING_TYPE_READ_ONLY :value: "BINDING_TYPE_READ_ONLY" .. py:attribute:: BINDING_TYPE_READ_WRITE :value: "BINDING_TYPE_READ_WRITE" - -.. autoclass:: WorkspaceBindingsResponse - :members: - :undoc-members: diff --git a/docs/dbdataclasses/cleanrooms.rst b/docs/dbdataclasses/cleanrooms.rst index 85ec98250..b07745b6f 100644 --- a/docs/dbdataclasses/cleanrooms.rst +++ b/docs/dbdataclasses/cleanrooms.rst @@ -84,6 +84,29 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CleanRoomNotebookReview + :members: + :undoc-members: + +.. py:class:: CleanRoomNotebookReviewNotebookReviewState + + .. py:attribute:: APPROVED + :value: "APPROVED" + + .. py:attribute:: PENDING + :value: "PENDING" + + .. py:attribute:: REJECTED + :value: "REJECTED" + +.. py:class:: CleanRoomNotebookReviewNotebookReviewSubReason + + .. py:attribute:: AUTO_APPROVED + :value: "AUTO_APPROVED" + + .. py:attribute:: BACKFILLED + :value: "BACKFILLED" + .. autoclass:: CleanRoomNotebookTaskRun :members: :undoc-members: diff --git a/docs/dbdataclasses/compute.rst b/docs/dbdataclasses/compute.rst index f7b55523b..fcee1a56c 100644 --- a/docs/dbdataclasses/compute.rst +++ b/docs/dbdataclasses/compute.rst @@ -968,10 +968,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: NodeTypeFlexibility - :members: - :undoc-members: - .. autoclass:: PendingInstanceError :members: :undoc-members: @@ -1485,6 +1481,24 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: NETVISOR_SETUP_TIMEOUT :value: "NETVISOR_SETUP_TIMEOUT" + .. py:attribute:: NETWORK_CHECK_CONTROL_PLANE_FAILURE + :value: "NETWORK_CHECK_CONTROL_PLANE_FAILURE" + + .. py:attribute:: NETWORK_CHECK_DNS_SERVER_FAILURE + :value: "NETWORK_CHECK_DNS_SERVER_FAILURE" + + .. py:attribute:: NETWORK_CHECK_METADATA_ENDPOINT_FAILURE + :value: "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE" + + .. py:attribute:: NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE + :value: "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE" + + .. py:attribute:: NETWORK_CHECK_NIC_FAILURE + :value: "NETWORK_CHECK_NIC_FAILURE" + + .. py:attribute:: NETWORK_CHECK_STORAGE_FAILURE + :value: "NETWORK_CHECK_STORAGE_FAILURE" + .. py:attribute:: NETWORK_CONFIGURATION_FAILURE :value: "NETWORK_CONFIGURATION_FAILURE" @@ -1521,6 +1535,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SECRET_CREATION_FAILURE :value: "SECRET_CREATION_FAILURE" + .. py:attribute:: SECRET_PERMISSION_DENIED + :value: "SECRET_PERMISSION_DENIED" + .. py:attribute:: SECRET_RESOLUTION_ERROR :value: "SECRET_RESOLUTION_ERROR" diff --git a/docs/dbdataclasses/dashboards.rst b/docs/dbdataclasses/dashboards.rst index 525049135..5ac3f0f4b 100644 --- a/docs/dbdataclasses/dashboards.rst +++ b/docs/dbdataclasses/dashboards.rst @@ -162,6 +162,15 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: DEPLOYMENT_NOT_FOUND_EXCEPTION :value: "DEPLOYMENT_NOT_FOUND_EXCEPTION" + .. py:attribute:: DESCRIBE_QUERY_INVALID_SQL_ERROR + :value: "DESCRIBE_QUERY_INVALID_SQL_ERROR" + + .. py:attribute:: DESCRIBE_QUERY_TIMEOUT + :value: "DESCRIBE_QUERY_TIMEOUT" + + .. py:attribute:: DESCRIBE_QUERY_UNEXPECTED_FAILURE + :value: "DESCRIBE_QUERY_UNEXPECTED_FAILURE" + .. py:attribute:: FUNCTIONS_NOT_AVAILABLE_EXCEPTION :value: "FUNCTIONS_NOT_AVAILABLE_EXCEPTION" @@ -198,6 +207,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION :value: "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION" + .. py:attribute:: INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION + :value: "INVALID_CHAT_COMPLETION_ARGUMENTS_JSON_EXCEPTION" + .. py:attribute:: INVALID_CHAT_COMPLETION_JSON_EXCEPTION :value: "INVALID_CHAT_COMPLETION_JSON_EXCEPTION" @@ -207,6 +219,15 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: INVALID_FUNCTION_CALL_EXCEPTION :value: "INVALID_FUNCTION_CALL_EXCEPTION" + .. py:attribute:: INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION + :value: "INVALID_SQL_MULTIPLE_DATASET_REFERENCES_EXCEPTION" + + .. py:attribute:: INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION + :value: "INVALID_SQL_MULTIPLE_STATEMENTS_EXCEPTION" + + .. py:attribute:: INVALID_SQL_UNKNOWN_TABLE_EXCEPTION + :value: "INVALID_SQL_UNKNOWN_TABLE_EXCEPTION" + .. py:attribute:: INVALID_TABLE_IDENTIFIER_EXCEPTION :value: "INVALID_TABLE_IDENTIFIER_EXCEPTION" diff --git a/docs/dbdataclasses/iam.rst b/docs/dbdataclasses/iam.rst index 6df58ae4e..a471503a7 100644 --- a/docs/dbdataclasses/iam.rst +++ b/docs/dbdataclasses/iam.rst @@ -193,6 +193,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CAN_BIND :value: "CAN_BIND" + .. py:attribute:: CAN_CREATE + :value: "CAN_CREATE" + .. py:attribute:: CAN_EDIT :value: "CAN_EDIT" @@ -214,6 +217,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: CAN_MONITOR :value: "CAN_MONITOR" + .. py:attribute:: CAN_MONITOR_ONLY + :value: "CAN_MONITOR_ONLY" + .. py:attribute:: CAN_QUERY :value: "CAN_QUERY" @@ -246,10 +252,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: PermissionsRequest - :members: - :undoc-members: - .. autoclass:: PrincipalOutput :members: :undoc-members: @@ -293,6 +295,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: URN_IETF_PARAMS_SCIM_SCHEMAS_CORE_2_0_SERVICE_PRINCIPAL :value: "URN_IETF_PARAMS_SCIM_SCHEMAS_CORE_2_0_SERVICE_PRINCIPAL" +.. autoclass:: SetObjectPermissions + :members: + :undoc-members: + +.. autoclass:: UpdateObjectPermissions + :members: + :undoc-members: + .. autoclass:: UpdateResponse :members: :undoc-members: diff --git a/docs/dbdataclasses/jobs.rst b/docs/dbdataclasses/jobs.rst index 2c4f4c09e..670e83685 100644 --- a/docs/dbdataclasses/jobs.rst +++ b/docs/dbdataclasses/jobs.rst @@ -964,7 +964,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: TerminationCodeCode - The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `USER_CANCELED`: The run was successfully canceled during execution by a user. * `CANCELED`: The run was canceled during execution by the Databricks platform; for example, if the maximum run duration was exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of concurrent active runs. Consider scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize requests have exceeded the allotted rate limit. Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer to the state message for further details. The causes might include, but are not limited to: The provided library is invalid, there are insufficient permissions to install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has already reached the maximum number of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user. + The code indicates why the run was terminated. Additional codes might be introduced in future releases. * `SUCCESS`: The run was completed successfully. * `SUCCESS_WITH_FAILURES`: The run was completed successfully but some child runs failed. * `USER_CANCELED`: The run was successfully canceled during execution by a user. * `CANCELED`: The run was canceled during execution by the Databricks platform; for example, if the maximum run duration was exceeded. * `SKIPPED`: Run was never executed, for example, if the upstream task run failed, the dependency type condition was not met, or there were no material tasks to execute. * `INTERNAL_ERROR`: The run encountered an unexpected error. Refer to the state message for further details. * `DRIVER_ERROR`: The run encountered an error while communicating with the Spark Driver. * `CLUSTER_ERROR`: The run failed due to a cluster error. Refer to the state message for further details. * `REPOSITORY_CHECKOUT_FAILED`: Failed to complete the checkout due to an error when communicating with the third party service. * `INVALID_CLUSTER_REQUEST`: The run failed because it issued an invalid request to start the cluster. * `WORKSPACE_RUN_LIMIT_EXCEEDED`: The workspace has reached the quota for the maximum number of concurrent active runs. Consider scheduling the runs over a larger time frame. * `FEATURE_DISABLED`: The run failed because it tried to access a feature unavailable for the workspace. * `CLUSTER_REQUEST_LIMIT_EXCEEDED`: The number of cluster creation, start, and upsize requests have exceeded the allotted rate limit. Consider spreading the run execution over a larger time frame. * `STORAGE_ACCESS_ERROR`: The run failed due to an error when accessing the customer blob storage. Refer to the state message for further details. * `RUN_EXECUTION_ERROR`: The run was completed with task failures. For more details, refer to the state message or run output. * `UNAUTHORIZED_ERROR`: The run failed due to a permission issue while accessing a resource. Refer to the state message for further details. * `LIBRARY_INSTALLATION_ERROR`: The run failed while installing the user-requested library. Refer to the state message for further details. The causes might include, but are not limited to: The provided library is invalid, there are insufficient permissions to install the library, and so forth. * `MAX_CONCURRENT_RUNS_EXCEEDED`: The scheduled run exceeds the limit of maximum concurrent runs set for the job. * `MAX_SPARK_CONTEXTS_EXCEEDED`: The run is scheduled on a cluster that has already reached the maximum number of contexts it is configured to create. See: [Link]. * `RESOURCE_NOT_FOUND`: A resource necessary for run execution does not exist. Refer to the state message for further details. * `INVALID_RUN_CONFIGURATION`: The run failed due to an invalid configuration. Refer to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user. [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now .. py:attribute:: BUDGET_POLICY_LIMIT_EXCEEDED @@ -1030,6 +1030,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: SUCCESS :value: "SUCCESS" + .. py:attribute:: SUCCESS_WITH_FAILURES + :value: "SUCCESS_WITH_FAILURES" + .. py:attribute:: UNAUTHORIZED_ERROR :value: "UNAUTHORIZED_ERROR" diff --git a/docs/dbdataclasses/ml.rst b/docs/dbdataclasses/ml.rst index d02256659..1778c5837 100644 --- a/docs/dbdataclasses/ml.rst +++ b/docs/dbdataclasses/ml.rst @@ -124,6 +124,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CreateLoggedModelRequest + :members: + :undoc-members: + +.. autoclass:: CreateLoggedModelResponse + :members: + :undoc-members: + .. autoclass:: CreateModelRequest :members: :undoc-members: @@ -184,6 +192,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeleteLoggedModelResponse + :members: + :undoc-members: + +.. autoclass:: DeleteLoggedModelTagResponse + :members: + :undoc-members: + .. autoclass:: DeleteModelResponse :members: :undoc-members: @@ -295,6 +311,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: FinalizeLoggedModelRequest + :members: + :undoc-members: + +.. autoclass:: FinalizeLoggedModelResponse + :members: + :undoc-members: + .. autoclass:: ForecastingExperiment :members: :undoc-members: @@ -344,6 +368,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: GetLoggedModelResponse + :members: + :undoc-members: + .. autoclass:: GetMetricHistoryResponse :members: :undoc-members: @@ -396,6 +424,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListLoggedModelArtifactsResponse + :members: + :undoc-members: + .. autoclass:: ListModelsResponse :members: :undoc-members: @@ -424,6 +456,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: LogLoggedModelParamsRequest + :members: + :undoc-members: + +.. autoclass:: LogLoggedModelParamsRequestResponse + :members: + :undoc-members: + .. autoclass:: LogMetric :members: :undoc-members: @@ -440,6 +480,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: LogOutputsRequest + :members: + :undoc-members: + +.. autoclass:: LogOutputsResponse + :members: + :undoc-members: + .. autoclass:: LogParam :members: :undoc-members: @@ -448,6 +496,39 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: LoggedModel + :members: + :undoc-members: + +.. autoclass:: LoggedModelData + :members: + :undoc-members: + +.. autoclass:: LoggedModelInfo + :members: + :undoc-members: + +.. autoclass:: LoggedModelParameter + :members: + :undoc-members: + +.. py:class:: LoggedModelStatus + + A LoggedModelStatus enum value represents the status of a logged model. + + .. py:attribute:: LOGGED_MODEL_PENDING + :value: "LOGGED_MODEL_PENDING" + + .. py:attribute:: LOGGED_MODEL_READY + :value: "LOGGED_MODEL_READY" + + .. py:attribute:: LOGGED_MODEL_UPLOAD_FAILED + :value: "LOGGED_MODEL_UPLOAD_FAILED" + +.. autoclass:: LoggedModelTag + :members: + :undoc-members: + .. autoclass:: Metric :members: :undoc-members: @@ -464,6 +545,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ModelOutput + :members: + :undoc-members: + .. autoclass:: ModelTag :members: :undoc-members: @@ -703,6 +788,22 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: SearchLoggedModelsDataset + :members: + :undoc-members: + +.. autoclass:: SearchLoggedModelsOrderBy + :members: + :undoc-members: + +.. autoclass:: SearchLoggedModelsRequest + :members: + :undoc-members: + +.. autoclass:: SearchLoggedModelsResponse + :members: + :undoc-members: + .. autoclass:: SearchModelVersionsResponse :members: :undoc-members: @@ -727,6 +828,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: SetLoggedModelTagsRequest + :members: + :undoc-members: + +.. autoclass:: SetLoggedModelTagsResponse + :members: + :undoc-members: + .. autoclass:: SetModelTagRequest :members: :undoc-members: diff --git a/docs/dbdataclasses/pipelines.rst b/docs/dbdataclasses/pipelines.rst index 671f9d02e..fbad8a4f3 100644 --- a/docs/dbdataclasses/pipelines.rst +++ b/docs/dbdataclasses/pipelines.rst @@ -130,6 +130,44 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. py:class:: IngestionSourceType + + .. py:attribute:: DYNAMICS365 + :value: "DYNAMICS365" + + .. py:attribute:: GA4_RAW_DATA + :value: "GA4_RAW_DATA" + + .. py:attribute:: MANAGED_POSTGRESQL + :value: "MANAGED_POSTGRESQL" + + .. py:attribute:: MYSQL + :value: "MYSQL" + + .. py:attribute:: NETSUITE + :value: "NETSUITE" + + .. py:attribute:: ORACLE + :value: "ORACLE" + + .. py:attribute:: POSTGRESQL + :value: "POSTGRESQL" + + .. py:attribute:: SALESFORCE + :value: "SALESFORCE" + + .. py:attribute:: SERVICENOW + :value: "SERVICENOW" + + .. py:attribute:: SHAREPOINT + :value: "SHAREPOINT" + + .. py:attribute:: SQLSERVER + :value: "SQLSERVER" + + .. py:attribute:: WORKDAY_RAAS + :value: "WORKDAY_RAAS" + .. autoclass:: ListPipelineEventsResponse :members: :undoc-members: @@ -171,6 +209,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: PathPattern + :members: + :undoc-members: + .. autoclass:: PipelineAccessControlRequest :members: :undoc-members: @@ -328,9 +370,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: StartUpdateCause + What triggered this update. + .. py:attribute:: API_CALL :value: "API_CALL" + .. py:attribute:: INFRASTRUCTURE_MAINTENANCE + :value: "INFRASTRUCTURE_MAINTENANCE" + .. py:attribute:: JOB_TASK :value: "JOB_TASK" @@ -383,6 +430,9 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: API_CALL :value: "API_CALL" + .. py:attribute:: INFRASTRUCTURE_MAINTENANCE + :value: "INFRASTRUCTURE_MAINTENANCE" + .. py:attribute:: JOB_TASK :value: "JOB_TASK" @@ -441,6 +491,8 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:class:: UpdateStateInfoState + The update state. + .. py:attribute:: CANCELED :value: "CANCELED" diff --git a/docs/dbdataclasses/serving.rst b/docs/dbdataclasses/serving.rst index 8284a86f0..01249dced 100644 --- a/docs/dbdataclasses/serving.rst +++ b/docs/dbdataclasses/serving.rst @@ -124,6 +124,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: CreatePtEndpointRequest + :members: + :undoc-members: + .. autoclass:: CreateServingEndpoint :members: :undoc-members: @@ -319,6 +323,14 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: PtEndpointCoreConfig + :members: + :undoc-members: + +.. autoclass:: PtServedModel + :members: + :undoc-members: + .. autoclass:: PutAiGatewayRequest :members: :undoc-members: @@ -524,6 +536,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateProvisionedThroughputEndpointConfigRequest + :members: + :undoc-members: + .. autoclass:: V1ResponseChoiceElement :members: :undoc-members: diff --git a/docs/dbdataclasses/settings.rst b/docs/dbdataclasses/settings.rst index 91b0f4669..0917028d7 100644 --- a/docs/dbdataclasses/settings.rst +++ b/docs/dbdataclasses/settings.rst @@ -8,6 +8,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: AccountNetworkPolicy + :members: + :undoc-members: + .. autoclass:: AibiDashboardEmbeddingAccessPolicy :members: :undoc-members: @@ -240,10 +244,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: DeleteLlmProxyPartnerPoweredWorkspaceResponse + :members: + :undoc-members: + .. autoclass:: DeleteNetworkConnectivityConfigurationResponse :members: :undoc-members: +.. autoclass:: DeleteNetworkPolicyRpcResponse + :members: + :undoc-members: + .. autoclass:: DeletePersonalComputeSettingResponse :members: :undoc-members: @@ -362,6 +374,66 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: GOOGLE_CLOUD_STORAGE :value: "GOOGLE_CLOUD_STORAGE" +.. autoclass:: EgressNetworkPolicyNetworkAccessPolicy + :members: + :undoc-members: + +.. autoclass:: EgressNetworkPolicyNetworkAccessPolicyInternetDestination + :members: + :undoc-members: + +.. py:class:: EgressNetworkPolicyNetworkAccessPolicyInternetDestinationInternetDestinationType + + .. py:attribute:: DNS_NAME + :value: "DNS_NAME" + +.. autoclass:: EgressNetworkPolicyNetworkAccessPolicyPolicyEnforcement + :members: + :undoc-members: + +.. py:class:: EgressNetworkPolicyNetworkAccessPolicyPolicyEnforcementDryRunModeProductFilter + + The values should match the list of workloads used in networkconfig.proto + + .. py:attribute:: DBSQL + :value: "DBSQL" + + .. py:attribute:: ML_SERVING + :value: "ML_SERVING" + +.. py:class:: EgressNetworkPolicyNetworkAccessPolicyPolicyEnforcementEnforcementMode + + .. py:attribute:: DRY_RUN + :value: "DRY_RUN" + + .. py:attribute:: ENFORCED + :value: "ENFORCED" + +.. py:class:: EgressNetworkPolicyNetworkAccessPolicyRestrictionMode + + At which level can Databricks and Databricks managed compute access Internet. FULL_ACCESS: Databricks can access Internet. No blocking rules will apply. RESTRICTED_ACCESS: Databricks can only access explicitly allowed internet and storage destinations, as well as UC connections and external locations. + + .. py:attribute:: FULL_ACCESS + :value: "FULL_ACCESS" + + .. py:attribute:: RESTRICTED_ACCESS + :value: "RESTRICTED_ACCESS" + +.. autoclass:: EgressNetworkPolicyNetworkAccessPolicyStorageDestination + :members: + :undoc-members: + +.. py:class:: EgressNetworkPolicyNetworkAccessPolicyStorageDestinationStorageDestinationType + + .. py:attribute:: AWS_S3 + :value: "AWS_S3" + + .. py:attribute:: AZURE_STORAGE + :value: "AZURE_STORAGE" + + .. py:attribute:: GOOGLE_CLOUD_STORAGE + :value: "GOOGLE_CLOUD_STORAGE" + .. py:class:: EgressResourceType The target resources that are supported by Network Connectivity Config. Note: some egress types can support general types that are not defined in EgressResourceType. E.g.: Azure private endpoint supports private link enabled Azure services. @@ -457,6 +529,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListNetworkPoliciesResponse + :members: + :undoc-members: + .. autoclass:: ListNotificationDestinationsResponse :members: :undoc-members: @@ -484,6 +560,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. py:attribute:: BLOCK :value: "BLOCK" +.. autoclass:: LlmProxyPartnerPoweredAccount + :members: + :undoc-members: + +.. autoclass:: LlmProxyPartnerPoweredEnforce + :members: + :undoc-members: + +.. autoclass:: LlmProxyPartnerPoweredWorkspace + :members: + :undoc-members: + .. autoclass:: MicrosoftTeamsConfig :members: :undoc-members: @@ -536,6 +624,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: NetworkPolicyEgress + :members: + :undoc-members: + .. autoclass:: NotificationDestination :members: :undoc-members: @@ -732,6 +824,18 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: UpdateLlmProxyPartnerPoweredAccountRequest + :members: + :undoc-members: + +.. autoclass:: UpdateLlmProxyPartnerPoweredEnforceRequest + :members: + :undoc-members: + +.. autoclass:: UpdateLlmProxyPartnerPoweredWorkspaceRequest + :members: + :undoc-members: + .. autoclass:: UpdateNotificationDestinationRequest :members: :undoc-members: @@ -751,3 +855,7 @@ These dataclasses are used in the SDK to represent API requests and responses fo .. autoclass:: UpdateRestrictWorkspaceAdminsSettingRequest :members: :undoc-members: + +.. autoclass:: WorkspaceNetworkOption + :members: + :undoc-members: diff --git a/docs/dbdataclasses/sharing.rst b/docs/dbdataclasses/sharing.rst index f72c59b21..2c79baa22 100644 --- a/docs/dbdataclasses/sharing.rst +++ b/docs/dbdataclasses/sharing.rst @@ -123,6 +123,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: FederationPolicy + :members: + :undoc-members: + .. autoclass:: FunctionParameterInfo :members: :undoc-members: @@ -166,6 +170,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: ListFederationPoliciesResponse + :members: + :undoc-members: + .. autoclass:: ListProviderShareAssetsResponse :members: :undoc-members: @@ -190,6 +198,10 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: +.. autoclass:: OidcFederationPolicy + :members: + :undoc-members: + .. autoclass:: Partition :members: :undoc-members: diff --git a/docs/dbdataclasses/sql.rst b/docs/dbdataclasses/sql.rst index ce015cf42..60712bc2e 100644 --- a/docs/dbdataclasses/sql.rst +++ b/docs/dbdataclasses/sql.rst @@ -294,10 +294,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: CreateAlertV2Request - :members: - :undoc-members: - .. autoclass:: CreateQueryRequest :members: :undoc-members: @@ -653,10 +649,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: ListAlertsV2ResponseAlert - :members: - :undoc-members: - .. py:class:: ListOrder .. py:attribute:: CREATED_AT @@ -1445,10 +1437,6 @@ These dataclasses are used in the SDK to represent API requests and responses fo :members: :undoc-members: -.. autoclass:: UpdateAlertV2Request - :members: - :undoc-members: - .. autoclass:: UpdateQueryRequest :members: :undoc-members: diff --git a/docs/workspace/catalog/database_instances.rst b/docs/workspace/catalog/database_instances.rst new file mode 100644 index 000000000..6c0dbf70e --- /dev/null +++ b/docs/workspace/catalog/database_instances.rst @@ -0,0 +1,136 @@ +``w.database_instances``: Database Instances +============================================ +.. currentmodule:: databricks.sdk.service.catalog + +.. py:class:: DatabaseInstancesAPI + + Database Instances provide access to a database via REST API or direct SQL. + + .. py:method:: create_database_catalog(catalog: DatabaseCatalog) -> DatabaseCatalog + + Create a Database Catalog. + + :param catalog: :class:`DatabaseCatalog` + + :returns: :class:`DatabaseCatalog` + + + .. py:method:: create_database_instance(database_instance: DatabaseInstance) -> DatabaseInstance + + Create a Database Instance. + + :param database_instance: :class:`DatabaseInstance` + A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage. + + :returns: :class:`DatabaseInstance` + + + .. py:method:: create_synced_database_table(synced_table: SyncedDatabaseTable) -> SyncedDatabaseTable + + Create a Synced Database Table. + + :param synced_table: :class:`SyncedDatabaseTable` + Next field marker: 10 + + :returns: :class:`SyncedDatabaseTable` + + + .. py:method:: delete_database_catalog(name: str) + + Delete a Database Catalog. + + :param name: str + + + + + .. py:method:: delete_database_instance(name: str [, force: Optional[bool], purge: Optional[bool]]) + + Delete a Database Instance. + + :param name: str + Name of the instance to delete. + :param force: bool (optional) + By default, a instance cannot be deleted if it has descendant instances created via PITR. If this + flag is specified as true, all descendent instances will be deleted as well. + :param purge: bool (optional) + If false, the database instance is soft deleted. Soft deleted instances behave as if they are + deleted, and cannot be used for CRUD operations nor connected to. However they can be undeleted by + calling the undelete API for a limited time. If true, the database instance is hard deleted and + cannot be undeleted. + + + + + .. py:method:: delete_synced_database_table(name: str) + + Delete a Synced Database Table. + + :param name: str + + + + + .. py:method:: find_database_instance_by_uid( [, uid: Optional[str]]) -> DatabaseInstance + + Find a Database Instance by uid. + + :param uid: str (optional) + UID of the cluster to get. + + :returns: :class:`DatabaseInstance` + + + .. py:method:: get_database_catalog(name: str) -> DatabaseCatalog + + Get a Database Catalog. + + :param name: str + + :returns: :class:`DatabaseCatalog` + + + .. py:method:: get_database_instance(name: str) -> DatabaseInstance + + Get a Database Instance. + + :param name: str + Name of the cluster to get. + + :returns: :class:`DatabaseInstance` + + + .. py:method:: get_synced_database_table(name: str) -> SyncedDatabaseTable + + Get a Synced Database Table. + + :param name: str + + :returns: :class:`SyncedDatabaseTable` + + + .. py:method:: list_database_instances( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[DatabaseInstance] + + List Database Instances. + + :param page_size: int (optional) + Upper bound for items returned. + :param page_token: str (optional) + Pagination token to go to the next page of Database Instances. Requests first page if absent. + + :returns: Iterator over :class:`DatabaseInstance` + + + .. py:method:: update_database_instance(name: str, database_instance: DatabaseInstance, update_mask: str) -> DatabaseInstance + + Update a Database Instance. + + :param name: str + The name of the instance. This is the unique identifier for the instance. + :param database_instance: :class:`DatabaseInstance` + A DatabaseInstance represents a logical Postgres instance, comprised of both compute and storage. + :param update_mask: str + The list of fields to update. + + :returns: :class:`DatabaseInstance` + \ No newline at end of file diff --git a/docs/workspace/catalog/external_locations.rst b/docs/workspace/catalog/external_locations.rst index 980467306..c9f1e3e37 100644 --- a/docs/workspace/catalog/external_locations.rst +++ b/docs/workspace/catalog/external_locations.rst @@ -15,7 +15,7 @@ To create external locations, you must be a metastore admin or a user with the **CREATE_EXTERNAL_LOCATION** privilege. - .. py:method:: create(name: str, url: str, credential_name: str [, access_point: Optional[str], comment: Optional[str], encryption_details: Optional[EncryptionDetails], fallback: Optional[bool], read_only: Optional[bool], skip_validation: Optional[bool]]) -> ExternalLocationInfo + .. py:method:: create(name: str, url: str, credential_name: str [, comment: Optional[str], enable_file_events: Optional[bool], encryption_details: Optional[EncryptionDetails], fallback: Optional[bool], file_event_queue: Optional[FileEventQueue], read_only: Optional[bool], skip_validation: Optional[bool]]) -> ExternalLocationInfo Usage: @@ -59,16 +59,18 @@ Path URL of the external location. :param credential_name: str Name of the storage credential used with this location. - :param access_point: str (optional) - The AWS access point to use when accesing s3 for this external location. :param comment: str (optional) User-provided free-form text description. + :param enable_file_events: bool (optional) + [Create:OPT Update:OPT] Whether to enable file events on this external location. :param encryption_details: :class:`EncryptionDetails` (optional) Encryption options that apply to clients connecting to cloud storage. :param fallback: bool (optional) Indicates whether fallback mode is enabled for this external location. When fallback mode is enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. + :param file_event_queue: :class:`FileEventQueue` (optional) + [Create:OPT Update:OPT] File event queue settings. :param read_only: bool (optional) Indicates whether the external location is read-only. :param skip_validation: bool (optional) @@ -109,20 +111,20 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', + url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), ) - _ = w.external_locations.get(get=created.name) + _ = w.external_locations.get(name=created.name) # cleanup - w.storage_credentials.delete(delete=credential.name) - w.external_locations.delete(delete=created.name) + w.storage_credentials.delete(name=credential.name) + w.external_locations.delete(name=created.name) Get an external location. @@ -146,11 +148,10 @@ .. code-block:: from databricks.sdk import WorkspaceClient - from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.external_locations.list(catalog.ListExternalLocationsRequest()) + all = w.external_locations.list() List external locations. @@ -172,7 +173,7 @@ :returns: Iterator over :class:`ExternalLocationInfo` - .. py:method:: update(name: str [, access_point: Optional[str], comment: Optional[str], credential_name: Optional[str], encryption_details: Optional[EncryptionDetails], fallback: Optional[bool], force: Optional[bool], isolation_mode: Optional[IsolationMode], new_name: Optional[str], owner: Optional[str], read_only: Optional[bool], skip_validation: Optional[bool], url: Optional[str]]) -> ExternalLocationInfo + .. py:method:: update(name: str [, comment: Optional[str], credential_name: Optional[str], enable_file_events: Optional[bool], encryption_details: Optional[EncryptionDetails], fallback: Optional[bool], file_event_queue: Optional[FileEventQueue], force: Optional[bool], isolation_mode: Optional[IsolationMode], new_name: Optional[str], owner: Optional[str], read_only: Optional[bool], skip_validation: Optional[bool], url: Optional[str]]) -> ExternalLocationInfo Usage: @@ -189,24 +190,24 @@ credential = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) created = w.external_locations.create( name=f"sdk-{time.time_ns()}", credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) _ = w.external_locations.update( name=created.name, credential_name=credential.name, - url="s3://%s/%s" % (os.environ["TEST_BUCKET"], f"sdk-{time.time_ns()}"), + url=f's3://{os.environ["TEST_BUCKET"]}/sdk-{time.time_ns()}', ) # cleanup - w.storage_credentials.delete(name=credential.name) - w.external_locations.delete(name=created.name) + w.storage_credentials.delete(delete=credential.name) + w.external_locations.delete(delete=created.name) Update an external location. @@ -216,18 +217,20 @@ :param name: str Name of the external location. - :param access_point: str (optional) - The AWS access point to use when accesing s3 for this external location. :param comment: str (optional) User-provided free-form text description. :param credential_name: str (optional) Name of the storage credential used with this location. + :param enable_file_events: bool (optional) + [Create:OPT Update:OPT] Whether to enable file events on this external location. :param encryption_details: :class:`EncryptionDetails` (optional) Encryption options that apply to clients connecting to cloud storage. :param fallback: bool (optional) Indicates whether fallback mode is enabled for this external location. When fallback mode is enabled, the access to the location falls back to cluster credentials if UC credentials are not sufficient. + :param file_event_queue: :class:`FileEventQueue` (optional) + [Create:OPT Update:OPT] File event queue settings. :param force: bool (optional) Force update even if changing url invalidates dependent external tables or mounts. :param isolation_mode: :class:`IsolationMode` (optional) diff --git a/docs/workspace/catalog/index.rst b/docs/workspace/catalog/index.rst index 471804098..7549bc487 100644 --- a/docs/workspace/catalog/index.rst +++ b/docs/workspace/catalog/index.rst @@ -11,6 +11,7 @@ Configure data governance with Unity Catalog for metastores, catalogs, schemas, catalogs connections credentials + database_instances external_locations functions grants diff --git a/docs/workspace/catalog/schemas.rst b/docs/workspace/catalog/schemas.rst index 7c4a84e53..d646a7489 100644 --- a/docs/workspace/catalog/schemas.rst +++ b/docs/workspace/catalog/schemas.rst @@ -179,7 +179,6 @@ :param comment: str (optional) User-provided free-form text description. :param enable_predictive_optimization: :class:`EnablePredictiveOptimization` (optional) - Whether predictive optimization should be enabled for this object and objects under it. :param new_name: str (optional) New name for the schema. :param owner: str (optional) diff --git a/docs/workspace/catalog/storage_credentials.rst b/docs/workspace/catalog/storage_credentials.rst index e199f7739..a1b985155 100644 --- a/docs/workspace/catalog/storage_credentials.rst +++ b/docs/workspace/catalog/storage_credentials.rst @@ -96,13 +96,13 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) - by_name = w.storage_credentials.get(name=created.name) + by_name = w.storage_credentials.get(get=created.name) # cleanup - w.storage_credentials.delete(name=created.name) + w.storage_credentials.delete(delete=created.name) Get a credential. @@ -123,11 +123,10 @@ .. code-block:: from databricks.sdk import WorkspaceClient - from databricks.sdk.service import catalog w = WorkspaceClient() - all = w.storage_credentials.list(catalog.ListStorageCredentialsRequest()) + all = w.storage_credentials.list() List credentials. @@ -165,17 +164,17 @@ created = w.storage_credentials.create( name=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) _ = w.storage_credentials.update( name=created.name, comment=f"sdk-{time.time_ns()}", - aws_iam_role=catalog.AwsIamRole(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), + aws_iam_role=catalog.AwsIamRoleRequest(role_arn=os.environ["TEST_METASTORE_DATA_ACCESS_ARN"]), ) # cleanup - w.storage_credentials.delete(delete=created.name) + w.storage_credentials.delete(name=created.name) Update a credential. diff --git a/docs/workspace/catalog/system_schemas.rst b/docs/workspace/catalog/system_schemas.rst index 4ef2294cc..545a3b2e2 100644 --- a/docs/workspace/catalog/system_schemas.rst +++ b/docs/workspace/catalog/system_schemas.rst @@ -22,7 +22,7 @@ - .. py:method:: enable(metastore_id: str, schema_name: str) + .. py:method:: enable(metastore_id: str, schema_name: str [, catalog_name: Optional[str]]) Enable a system schema. @@ -33,6 +33,8 @@ The metastore ID under which the system schema lives. :param schema_name: str Full name of the system schema. + :param catalog_name: str (optional) + the catalog for which the system schema is to enabled in diff --git a/docs/workspace/catalog/workspace_bindings.rst b/docs/workspace/catalog/workspace_bindings.rst index e469def12..c507d4c78 100644 --- a/docs/workspace/catalog/workspace_bindings.rst +++ b/docs/workspace/catalog/workspace_bindings.rst @@ -17,9 +17,9 @@ the new path (/api/2.1/unity-catalog/bindings/{securable_type}/{securable_name}) which introduces the ability to bind a securable in READ_ONLY mode (catalogs only). - Securable types that support binding: - catalog - storage_credential - external_location + Securable types that support binding: - catalog - storage_credential - credential - external_location - .. py:method:: get(name: str) -> CurrentWorkspaceBindings + .. py:method:: get(name: str) -> GetCatalogWorkspaceBindingsResponse Usage: @@ -47,18 +47,19 @@ :param name: str The name of the catalog. - :returns: :class:`CurrentWorkspaceBindings` + :returns: :class:`GetCatalogWorkspaceBindingsResponse` - .. py:method:: get_bindings(securable_type: GetBindingsSecurableType, securable_name: str [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[WorkspaceBinding] + .. py:method:: get_bindings(securable_type: str, securable_name: str [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[WorkspaceBinding] Get securable workspace bindings. Gets workspace bindings of the securable. The caller must be a metastore admin or an owner of the securable. - :param securable_type: :class:`GetBindingsSecurableType` - The type of the securable to bind to a workspace. + :param securable_type: str + The type of the securable to bind to a workspace (catalog, storage_credential, credential, or + external_location). :param securable_name: str The name of the securable. :param max_results: int (optional) @@ -72,7 +73,7 @@ :returns: Iterator over :class:`WorkspaceBinding` - .. py:method:: update(name: str [, assign_workspaces: Optional[List[int]], unassign_workspaces: Optional[List[int]]]) -> CurrentWorkspaceBindings + .. py:method:: update(name: str [, assign_workspaces: Optional[List[int]], unassign_workspaces: Optional[List[int]]]) -> UpdateCatalogWorkspaceBindingsResponse Usage: @@ -107,24 +108,25 @@ :param unassign_workspaces: List[int] (optional) A list of workspace IDs. - :returns: :class:`CurrentWorkspaceBindings` + :returns: :class:`UpdateCatalogWorkspaceBindingsResponse` - .. py:method:: update_bindings(securable_type: UpdateBindingsSecurableType, securable_name: str [, add: Optional[List[WorkspaceBinding]], remove: Optional[List[WorkspaceBinding]]]) -> WorkspaceBindingsResponse + .. py:method:: update_bindings(securable_type: str, securable_name: str [, add: Optional[List[WorkspaceBinding]], remove: Optional[List[WorkspaceBinding]]]) -> UpdateWorkspaceBindingsResponse Update securable workspace bindings. Updates workspace bindings of the securable. The caller must be a metastore admin or an owner of the securable. - :param securable_type: :class:`UpdateBindingsSecurableType` - The type of the securable to bind to a workspace. + :param securable_type: str + The type of the securable to bind to a workspace (catalog, storage_credential, credential, or + external_location). :param securable_name: str The name of the securable. :param add: List[:class:`WorkspaceBinding`] (optional) - List of workspace bindings + List of workspace bindings. :param remove: List[:class:`WorkspaceBinding`] (optional) - List of workspace bindings + List of workspace bindings. - :returns: :class:`WorkspaceBindingsResponse` + :returns: :class:`UpdateWorkspaceBindingsResponse` \ No newline at end of file diff --git a/docs/workspace/compute/clusters.rst b/docs/workspace/compute/clusters.rst index 6bc5faf56..fe9271c13 100644 --- a/docs/workspace/compute/clusters.rst +++ b/docs/workspace/compute/clusters.rst @@ -147,7 +147,8 @@ of executor logs is `$destination/$clusterId/executor`. :param cluster_name: str (optional) Cluster name requested by the user. This doesn't have to be unique. If not specified at creation, - the cluster name will be an empty string. + the cluster name will be an empty string. For job clusters, the cluster name is automatically set + based on the job and job run IDs. :param custom_tags: Dict[str,str] (optional) Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS instances and EBS volumes) with these tags in addition to `default_tags`. Notes: @@ -417,7 +418,8 @@ of executor logs is `$destination/$clusterId/executor`. :param cluster_name: str (optional) Cluster name requested by the user. This doesn't have to be unique. If not specified at creation, - the cluster name will be an empty string. + the cluster name will be an empty string. For job clusters, the cluster name is automatically set + based on the job and job run IDs. :param custom_tags: Dict[str,str] (optional) Additional tags for cluster resources. Databricks will tag all cluster resources (e.g., AWS instances and EBS volumes) with these tags in addition to `default_tags`. Notes: diff --git a/docs/workspace/compute/instance_pools.rst b/docs/workspace/compute/instance_pools.rst index 38ccb8b37..0614f2101 100644 --- a/docs/workspace/compute/instance_pools.rst +++ b/docs/workspace/compute/instance_pools.rst @@ -105,7 +105,7 @@ - .. py:method:: edit(instance_pool_id: str, instance_pool_name: str, node_type_id: str [, custom_tags: Optional[Dict[str, str]], idle_instance_autotermination_minutes: Optional[int], max_capacity: Optional[int], min_idle_instances: Optional[int], node_type_flexibility: Optional[NodeTypeFlexibility]]) + .. py:method:: edit(instance_pool_id: str, instance_pool_name: str, node_type_id: str [, custom_tags: Optional[Dict[str, str]], idle_instance_autotermination_minutes: Optional[int], max_capacity: Optional[int], min_idle_instances: Optional[int]]) Usage: @@ -162,9 +162,6 @@ upsize requests. :param min_idle_instances: int (optional) Minimum number of idle instances to keep in the instance pool - :param node_type_flexibility: :class:`NodeTypeFlexibility` (optional) - For Fleet-pool V2, this object contains the information about the alternate node type ids to use - when attempting to launch a cluster if the node type id is not available. diff --git a/docs/workspace/compute/instance_profiles.rst b/docs/workspace/compute/instance_profiles.rst index 6b7c1ca05..182e1aa79 100644 --- a/docs/workspace/compute/instance_profiles.rst +++ b/docs/workspace/compute/instance_profiles.rst @@ -32,8 +32,10 @@ Register an instance profile. - In the UI, you can select the instance profile when launching clusters. This API is only available to - admin users. + Registers an instance profile in Databricks. In the UI, you can then give users the permission to use + this instance profile when launching clusters. + + This API is only available to admin users. :param instance_profile_arn: str The AWS ARN of the instance profile to register with Databricks. This field is required. diff --git a/docs/workspace/files/files.rst b/docs/workspace/files/files.rst index 661396ad1..ad7bca57e 100644 --- a/docs/workspace/files/files.rst +++ b/docs/workspace/files/files.rst @@ -149,7 +149,8 @@ The absolute path of the file. :param contents: BinaryIO :param overwrite: bool (optional) - If true, an existing file will be overwritten. + If true or unspecified, an existing file will be overwritten. If false, an error will be returned if + the path points to an existing file. \ No newline at end of file diff --git a/docs/workspace/iam/account_access_control_proxy.rst b/docs/workspace/iam/account_access_control_proxy.rst index 3338671bb..66c396be5 100644 --- a/docs/workspace/iam/account_access_control_proxy.rst +++ b/docs/workspace/iam/account_access_control_proxy.rst @@ -6,18 +6,23 @@ These APIs manage access rules on resources in an account. Currently, only grant rules are supported. A grant rule specifies a role assigned to a set of principals. A list of rules attached to a resource is - called a rule set. A workspace must belong to an account for these APIs to work. + called a rule set. A workspace must belong to an account for these APIs to work .. py:method:: get_assignable_roles_for_resource(resource: str) -> GetAssignableRolesForResourceResponse Get assignable roles for a resource. - Gets all the roles that can be granted on an account-level resource. A role is grantable if the rule + Gets all the roles that can be granted on an account level resource. A role is grantable if the rule set on the resource can contain an access rule of the role. :param resource: str The resource name for which assignable roles will be listed. + Examples | Summary :--- | :--- `resource=accounts/` | A resource name for the account. + `resource=accounts//groups/` | A resource name for the group. + `resource=accounts//servicePrincipals/` | A resource name for the service + principal. + :returns: :class:`GetAssignableRolesForResourceResponse` @@ -30,6 +35,12 @@ :param name: str The ruleset name associated with the request. + + Examples | Summary :--- | :--- `name=accounts//ruleSets/default` | A name for a rule set + on the account. `name=accounts//groups//ruleSets/default` | A name for a rule + set on the group. + `name=accounts//servicePrincipals//ruleSets/default` | + A name for a rule set on the service principal. :param etag: str Etag used for versioning. The response is at least as fresh as the eTag provided. Etag is used for optimistic concurrency control as a way to help prevent simultaneous updates of a rule set from @@ -38,6 +49,10 @@ etag from a GET rule set request, and pass it with the PUT update request to identify the rule set version you are updating. + Examples | Summary :--- | :--- `etag=` | An empty etag can only be used in GET to indicate no + freshness requirements. `etag=RENUAAABhSweA4NvVmmUYdiU717H3Tgy0UJdor3gE4a+mq/oj9NjAf8ZsQ==` | An + etag encoded a specific version of the rule set to get or to be updated. + :returns: :class:`RuleSetResponse` @@ -45,8 +60,8 @@ Update a rule set. - Replace the rules of a rule set. First, use a GET rule set request to read the current version of the - rule set before modifying it. This pattern helps prevent conflicts between concurrent updates. + Replace the rules of a rule set. First, use get to read the current version of the rule set before + modifying it. This pattern helps prevent conflicts between concurrent updates. :param name: str Name of the rule set. diff --git a/docs/workspace/iam/current_user.rst b/docs/workspace/iam/current_user.rst index 1df3adf9f..bf739025c 100644 --- a/docs/workspace/iam/current_user.rst +++ b/docs/workspace/iam/current_user.rst @@ -17,7 +17,7 @@ w = WorkspaceClient() - me2 = w.current_user.me() + me = w.current_user.me() Get current user info. diff --git a/docs/workspace/iam/permissions.rst b/docs/workspace/iam/permissions.rst index 6cd5b269b..0c3ef26fc 100644 --- a/docs/workspace/iam/permissions.rst +++ b/docs/workspace/iam/permissions.rst @@ -5,51 +5,24 @@ .. py:class:: PermissionsAPI Permissions API are used to create read, write, edit, update and manage access for various users on - different objects and endpoints. - - * **[Apps permissions](:service:apps)** — Manage which users can manage or use apps. - - * **[Cluster permissions](:service:clusters)** — Manage which users can manage, restart, or attach to - clusters. - - * **[Cluster policy permissions](:service:clusterpolicies)** — Manage which users can use cluster - policies. - - * **[Delta Live Tables pipeline permissions](:service:pipelines)** — Manage which users can view, - manage, run, cancel, or own a Delta Live Tables pipeline. - - * **[Job permissions](:service:jobs)** — Manage which users can view, manage, trigger, cancel, or own a - job. - - * **[MLflow experiment permissions](:service:experiments)** — Manage which users can read, edit, or - manage MLflow experiments. - - * **[MLflow registered model permissions](:service:modelregistry)** — Manage which users can read, edit, - or manage MLflow registered models. - - * **[Password permissions](:service:users)** — Manage which users can use password login when SSO is - enabled. - - * **[Instance Pool permissions](:service:instancepools)** — Manage which users can manage or attach to - pools. - - * **[Repo permissions](repos)** — Manage which users can read, run, edit, or manage a repo. - - * **[Serving endpoint permissions](:service:servingendpoints)** — Manage which users can view, query, or - manage a serving endpoint. - - * **[SQL warehouse permissions](:service:warehouses)** — Manage which users can use or manage SQL - warehouses. - - * **[Token permissions](:service:tokenmanagement)** — Manage which users can create or use tokens. - - * **[Workspace object permissions](:service:workspace)** — Manage which users can read, run, edit, or - manage alerts, dbsql-dashboards, directories, files, notebooks and queries. - - For the mapping of the required permissions for specific actions or abilities and other important - information, see [Access Control]. - - Note that to manage access control on service principals, use **[Account Access Control + different objects and endpoints. * **[Apps permissions](:service:apps)** — Manage which users can manage + or use apps. * **[Cluster permissions](:service:clusters)** — Manage which users can manage, restart, or + attach to clusters. * **[Cluster policy permissions](:service:clusterpolicies)** — Manage which users + can use cluster policies. * **[Delta Live Tables pipeline permissions](:service:pipelines)** — Manage + which users can view, manage, run, cancel, or own a Delta Live Tables pipeline. * **[Job + permissions](:service:jobs)** — Manage which users can view, manage, trigger, cancel, or own a job. * + **[MLflow experiment permissions](:service:experiments)** — Manage which users can read, edit, or manage + MLflow experiments. * **[MLflow registered model permissions](:service:modelregistry)** — Manage which + users can read, edit, or manage MLflow registered models. * **[Instance Pool + permissions](:service:instancepools)** — Manage which users can manage or attach to pools. * **[Repo + permissions](repos)** — Manage which users can read, run, edit, or manage a repo. * **[Serving endpoint + permissions](:service:servingendpoints)** — Manage which users can view, query, or manage a serving + endpoint. * **[SQL warehouse permissions](:service:warehouses)** — Manage which users can use or manage + SQL warehouses. * **[Token permissions](:service:tokenmanagement)** — Manage which users can create or + use tokens. * **[Workspace object permissions](:service:workspace)** — Manage which users can read, run, + edit, or manage alerts, dbsql-dashboards, directories, files, notebooks and queries. For the mapping of + the required permissions for specific actions or abilities and other important information, see [Access + Control]. Note that to manage access control on service principals, use **[Account Access Control Proxy](:service:accountaccesscontrolproxy)**. [Access Control]: https://docs.databricks.com/security/auth-authz/access-control/index.html @@ -112,9 +85,10 @@ Gets the permission levels that a user can have on an object. :param request_object_type: str - + The type of the request object. Can be one of the following: alerts, authorization, clusters, + cluster-policies, dashboards, dbsql-dashboards, directories, experiments, files, instance-pools, + jobs, notebooks, pipelines, queries, registered-models, repos, serving-endpoints, or warehouses. :param request_object_id: str - :returns: :class:`GetPermissionLevelsResponse` diff --git a/docs/workspace/iam/service_principals.rst b/docs/workspace/iam/service_principals.rst index 74a498b00..40b65f6d5 100644 --- a/docs/workspace/iam/service_principals.rst +++ b/docs/workspace/iam/service_principals.rst @@ -20,19 +20,13 @@ import time from databricks.sdk import WorkspaceClient - from databricks.sdk.service import iam w = WorkspaceClient() - groups = w.groups.group_display_name_to_id_map(iam.ListGroupsRequest()) - - spn = w.service_principals.create( - display_name=f"sdk-{time.time_ns()}", - groups=[iam.ComplexValue(value=groups["admins"])], - ) + created = w.service_principals.create(display_name=f"sdk-{time.time_ns()}") # cleanup - w.service_principals.delete(id=spn.id) + w.service_principals.delete(id=created.id) Create a service principal. diff --git a/docs/workspace/iam/users.rst b/docs/workspace/iam/users.rst index 0028cafe8..34de48f3b 100644 --- a/docs/workspace/iam/users.rst +++ b/docs/workspace/iam/users.rst @@ -27,10 +27,10 @@ w = WorkspaceClient() - other_owner = w.users.create(user_name=f"sdk-{time.time_ns()}@example.com") - - # cleanup - w.users.delete(id=other_owner.id) + user = w.users.create( + display_name=f"sdk-{time.time_ns()}", + user_name=f"sdk-{time.time_ns()}@example.com", + ) Create a new user. diff --git a/docs/workspace/ml/experiments.rst b/docs/workspace/ml/experiments.rst index da8156e65..f18915885 100644 --- a/docs/workspace/ml/experiments.rst +++ b/docs/workspace/ml/experiments.rst @@ -51,6 +51,26 @@ :returns: :class:`CreateExperimentResponse` + .. py:method:: create_logged_model(experiment_id: str [, model_type: Optional[str], name: Optional[str], params: Optional[List[LoggedModelParameter]], source_run_id: Optional[str], tags: Optional[List[LoggedModelTag]]]) -> CreateLoggedModelResponse + + Create a logged model. + + :param experiment_id: str + The ID of the experiment that owns the model. + :param model_type: str (optional) + The type of the model, such as ``"Agent"``, ``"Classifier"``, ``"LLM"``. + :param name: str (optional) + The name of the model (optional). If not specified one will be generated. + :param params: List[:class:`LoggedModelParameter`] (optional) + Parameters attached to the model. + :param source_run_id: str (optional) + The ID of the run that created the model. + :param tags: List[:class:`LoggedModelTag`] (optional) + Tags attached to the model. + + :returns: :class:`CreateLoggedModelResponse` + + .. py:method:: create_run( [, experiment_id: Optional[str], run_name: Optional[str], start_time: Optional[int], tags: Optional[List[RunTag]], user_id: Optional[str]]) -> CreateRunResponse @@ -110,6 +130,28 @@ + .. py:method:: delete_logged_model(model_id: str) + + Delete a logged model. + + :param model_id: str + The ID of the logged model to delete. + + + + + .. py:method:: delete_logged_model_tag(model_id: str, tag_key: str) + + Delete a tag on a logged model. + + :param model_id: str + The ID of the logged model to delete the tag from. + :param tag_key: str + The tag key. + + + + .. py:method:: delete_run(run_id: str) Delete a run. @@ -157,6 +199,19 @@ + .. py:method:: finalize_logged_model(model_id: str, status: LoggedModelStatus) -> FinalizeLoggedModelResponse + + Finalize a logged model. + + :param model_id: str + The ID of the logged model to finalize. + :param status: :class:`LoggedModelStatus` + Whether or not the model is ready for use. ``"LOGGED_MODEL_UPLOAD_FAILED"`` indicates that something + went wrong when logging the model weights / agent code). + + :returns: :class:`FinalizeLoggedModelResponse` + + .. py:method:: get_by_name(experiment_name: str) -> GetExperimentByNameResponse Get an experiment by name. @@ -247,6 +302,16 @@ :returns: Iterator over :class:`Metric` + .. py:method:: get_logged_model(model_id: str) -> GetLoggedModelResponse + + Get a logged model. + + :param model_id: str + The ID of the logged model to retrieve. + + :returns: :class:`GetLoggedModelResponse` + + .. py:method:: get_permission_levels(experiment_id: str) -> GetExperimentPermissionLevelsResponse Get experiment permission levels. @@ -346,6 +411,26 @@ :returns: Iterator over :class:`Experiment` + .. py:method:: list_logged_model_artifacts(model_id: str [, artifact_directory_path: Optional[str], page_token: Optional[str]]) -> ListLoggedModelArtifactsResponse + + List artifacts for a logged model. + + List artifacts for a logged model. Takes an optional ``artifact_directory_path`` prefix which if + specified, the response contains only artifacts with the specified prefix. + + :param model_id: str + The ID of the logged model for which to list the artifacts. + :param artifact_directory_path: str (optional) + Filter artifacts matching this path (a relative path from the root artifact directory). + :param page_token: str (optional) + Token indicating the page of artifact results to fetch. `page_token` is not supported when listing + artifacts in UC Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. Please call + `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC Volumes, which supports + pagination. See [List directory contents | Files API](/api/workspace/files/listdirectorycontents). + + :returns: :class:`ListLoggedModelArtifactsResponse` + + .. py:method:: log_batch( [, metrics: Optional[List[Metric]], params: Optional[List[Param]], run_id: Optional[str], tags: Optional[List[RunTag]]]) Log a batch of metrics/params/tags for a run. @@ -424,6 +509,22 @@ + .. py:method:: log_logged_model_params(model_id: str [, params: Optional[List[LoggedModelParameter]]]) + + Log params for a logged model. + + Logs params for a logged model. A param is a key-value pair (string key, string value). Examples + include hyperparameters used for ML model training. A param can be logged only once for a logged + model, and attempting to overwrite an existing param with a different value will result in an error + + :param model_id: str + The ID of the logged model to log params for. + :param params: List[:class:`LoggedModelParameter`] (optional) + Parameters to attach to the model. + + + + .. py:method:: log_metric(key: str, value: float, timestamp: int [, dataset_digest: Optional[str], dataset_name: Optional[str], model_id: Optional[str], run_id: Optional[str], run_uuid: Optional[str], step: Optional[int]]) Log a metric for a run. @@ -471,6 +572,22 @@ + .. py:method:: log_outputs(run_id: str [, models: Optional[List[ModelOutput]]]) + + Log outputs from a run. + + **NOTE**: Experimental: This API may change or be removed in a future release without warning. + + Logs outputs, such as models, from an MLflow Run. + + :param run_id: str + The ID of the Run from which to log outputs. + :param models: List[:class:`ModelOutput`] (optional) + The model outputs from the Run. + + + + .. py:method:: log_param(key: str, value: str [, run_id: Optional[str], run_uuid: Optional[str]]) Log a param for a run. @@ -564,6 +681,35 @@ :returns: Iterator over :class:`Experiment` + .. py:method:: search_logged_models( [, datasets: Optional[List[SearchLoggedModelsDataset]], experiment_ids: Optional[List[str]], filter: Optional[str], max_results: Optional[int], order_by: Optional[List[SearchLoggedModelsOrderBy]], page_token: Optional[str]]) -> SearchLoggedModelsResponse + + Search logged models. + + Search for Logged Models that satisfy specified search criteria. + + :param datasets: List[:class:`SearchLoggedModelsDataset`] (optional) + List of datasets on which to apply the metrics filter clauses. For example, a filter with + `metrics.accuracy > 0.9` and dataset info with name "test_dataset" means we will return all logged + models with accuracy > 0.9 on the test_dataset. Metric values from ANY dataset matching the criteria + are considered. If no datasets are specified, then metrics across all datasets are considered in the + filter. + :param experiment_ids: List[str] (optional) + The IDs of the experiments in which to search for logged models. + :param filter: str (optional) + A filter expression over logged model info and data that allows returning a subset of logged models. + The syntax is a subset of SQL that supports AND'ing together binary operations. + + Example: ``params.alpha < 0.3 AND metrics.accuracy > 0.9``. + :param max_results: int (optional) + The maximum number of Logged Models to return. The maximum limit is 50. + :param order_by: List[:class:`SearchLoggedModelsOrderBy`] (optional) + The list of columns for ordering the results, with additional fields for sorting criteria. + :param page_token: str (optional) + The token indicating the page of logged models to fetch. + + :returns: :class:`SearchLoggedModelsResponse` + + .. py:method:: search_runs( [, experiment_ids: Optional[List[str]], filter: Optional[str], max_results: Optional[int], order_by: Optional[List[str]], page_token: Optional[str], run_view_type: Optional[ViewType]]) -> Iterator[Run] Search for runs. @@ -617,6 +763,18 @@ + .. py:method:: set_logged_model_tags(model_id: str [, tags: Optional[List[LoggedModelTag]]]) + + Set a tag for a logged model. + + :param model_id: str + The ID of the logged model to set the tags on. + :param tags: List[:class:`LoggedModelTag`] (optional) + The tags to set on the logged model. + + + + .. py:method:: set_permissions(experiment_id: str [, access_control_list: Optional[List[ExperimentAccessControlRequest]]]) -> ExperimentPermissions Set experiment permissions. diff --git a/docs/workspace/ml/forecasting.rst b/docs/workspace/ml/forecasting.rst index fc43a3e84..79fca0ffe 100644 --- a/docs/workspace/ml/forecasting.rst +++ b/docs/workspace/ml/forecasting.rst @@ -6,14 +6,14 @@ The Forecasting API allows you to create and get serverless forecasting experiments - .. py:method:: create_experiment(train_data_path: str, target_column: str, time_column: str, forecast_granularity: str, forecast_horizon: int [, custom_weights_column: Optional[str], experiment_path: Optional[str], holiday_regions: Optional[List[str]], include_features: Optional[List[str]], max_runtime: Optional[int], prediction_data_path: Optional[str], primary_metric: Optional[str], register_to: Optional[str], split_column: Optional[str], timeseries_identifier_columns: Optional[List[str]], training_frameworks: Optional[List[str]]]) -> Wait[ForecastingExperiment] + .. py:method:: create_experiment(train_data_path: str, target_column: str, time_column: str, forecast_granularity: str, forecast_horizon: int [, custom_weights_column: Optional[str], experiment_path: Optional[str], future_feature_data_path: Optional[str], holiday_regions: Optional[List[str]], include_features: Optional[List[str]], max_runtime: Optional[int], prediction_data_path: Optional[str], primary_metric: Optional[str], register_to: Optional[str], split_column: Optional[str], timeseries_identifier_columns: Optional[List[str]], training_frameworks: Optional[List[str]]]) -> Wait[ForecastingExperiment] Create a forecasting experiment. Creates a serverless forecasting experiment. Returns the experiment ID. :param train_data_path: str - The fully qualified name of a Unity Catalog table, formatted as catalog_name.schema_name.table_name, + The fully qualified path of a Unity Catalog table, formatted as catalog_name.schema_name.table_name, used as training data for the forecasting model. :param target_column: str The column in the input training table used as the prediction target for model training. The values @@ -31,6 +31,9 @@ The column in the training table used to customize weights for each time series. :param experiment_path: str (optional) The path in the workspace to store the created experiment. + :param future_feature_data_path: str (optional) + The fully qualified path of a Unity Catalog table, formatted as catalog_name.schema_name.table_name, + used to store future feature data for predictions. :param holiday_regions: List[str] (optional) The region code(s) to automatically add holiday features. Currently supports only one region. :param include_features: List[str] (optional) @@ -63,7 +66,7 @@ See :method:wait_get_experiment_forecasting_succeeded for more details. - .. py:method:: create_experiment_and_wait(train_data_path: str, target_column: str, time_column: str, forecast_granularity: str, forecast_horizon: int [, custom_weights_column: Optional[str], experiment_path: Optional[str], holiday_regions: Optional[List[str]], include_features: Optional[List[str]], max_runtime: Optional[int], prediction_data_path: Optional[str], primary_metric: Optional[str], register_to: Optional[str], split_column: Optional[str], timeseries_identifier_columns: Optional[List[str]], training_frameworks: Optional[List[str]], timeout: datetime.timedelta = 2:00:00]) -> ForecastingExperiment + .. py:method:: create_experiment_and_wait(train_data_path: str, target_column: str, time_column: str, forecast_granularity: str, forecast_horizon: int [, custom_weights_column: Optional[str], experiment_path: Optional[str], future_feature_data_path: Optional[str], holiday_regions: Optional[List[str]], include_features: Optional[List[str]], max_runtime: Optional[int], prediction_data_path: Optional[str], primary_metric: Optional[str], register_to: Optional[str], split_column: Optional[str], timeseries_identifier_columns: Optional[List[str]], training_frameworks: Optional[List[str]], timeout: datetime.timedelta = 2:00:00]) -> ForecastingExperiment .. py:method:: get_experiment(experiment_id: str) -> ForecastingExperiment diff --git a/docs/workspace/ml/model_registry.rst b/docs/workspace/ml/model_registry.rst index 1f6dcf4d2..23c357275 100644 --- a/docs/workspace/ml/model_registry.rst +++ b/docs/workspace/ml/model_registry.rst @@ -95,6 +95,8 @@ w = WorkspaceClient() model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") + + mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Create a model. @@ -127,7 +129,7 @@ model = w.model_registry.create_model(name=f"sdk-{time.time_ns()}") - created = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") + mv = w.model_registry.create_model_version(name=model.registered_model.name, source="dbfs:/tmp") Create a model version. diff --git a/docs/workspace/pipelines/pipelines.rst b/docs/workspace/pipelines/pipelines.rst index b2cc3f346..5b52818da 100644 --- a/docs/workspace/pipelines/pipelines.rst +++ b/docs/workspace/pipelines/pipelines.rst @@ -15,7 +15,7 @@ also enforce data quality with Delta Live Tables expectations. Expectations allow you to define expected data quality and specify how to handle records that fail those expectations. - .. py:method:: create( [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], event_log: Optional[EventLogSpec], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) -> CreatePipelineResponse + .. py:method:: create( [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], dry_run: Optional[bool], edition: Optional[str], event_log: Optional[EventLogSpec], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], root_path: Optional[str], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) -> CreatePipelineResponse Usage: @@ -100,6 +100,10 @@ Whether Photon is enabled for this pipeline. :param restart_window: :class:`RestartWindow` (optional) Restart window of this pipeline. + :param root_path: str (optional) + Root path for this pipeline. This is used as the root directory when editing the pipeline in the + Databricks user interface and it is added to sys.path when executing Python sources during pipeline + execution. :param run_as: :class:`RunAs` (optional) Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline. @@ -259,6 +263,7 @@ Retrieves events for a pipeline. :param pipeline_id: str + The pipeline to return events for. :param filter: str (optional) Criteria to select a subset of results, expressed using a SQL-like syntax. The supported filters are: 1. level='INFO' (or WARN or ERROR) 2. level in ('INFO', 'WARN') 3. id='[event-id]' 4. timestamp @@ -362,6 +367,7 @@ :param pipeline_id: str :param cause: :class:`StartUpdateCause` (optional) + What triggered this update. :param full_refresh: bool (optional) If true, this update will reset all tables before running. :param full_refresh_selection: List[str] (optional) @@ -396,7 +402,7 @@ .. py:method:: stop_and_wait(pipeline_id: str, timeout: datetime.timedelta = 0:20:00) -> GetPipelineResponse - .. py:method:: update(pipeline_id: str [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], edition: Optional[str], event_log: Optional[EventLogSpec], expected_last_modified: Optional[int], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) + .. py:method:: update(pipeline_id: str [, allow_duplicate_names: Optional[bool], budget_policy_id: Optional[str], catalog: Optional[str], channel: Optional[str], clusters: Optional[List[PipelineCluster]], configuration: Optional[Dict[str, str]], continuous: Optional[bool], deployment: Optional[PipelineDeployment], development: Optional[bool], edition: Optional[str], event_log: Optional[EventLogSpec], expected_last_modified: Optional[int], filters: Optional[Filters], gateway_definition: Optional[IngestionGatewayPipelineDefinition], id: Optional[str], ingestion_definition: Optional[IngestionPipelineDefinition], libraries: Optional[List[PipelineLibrary]], name: Optional[str], notifications: Optional[List[Notifications]], photon: Optional[bool], restart_window: Optional[RestartWindow], root_path: Optional[str], run_as: Optional[RunAs], schema: Optional[str], serverless: Optional[bool], storage: Optional[str], target: Optional[str], trigger: Optional[PipelineTrigger]]) Usage: @@ -500,6 +506,10 @@ Whether Photon is enabled for this pipeline. :param restart_window: :class:`RestartWindow` (optional) Restart window of this pipeline. + :param root_path: str (optional) + Root path for this pipeline. This is used as the root directory when editing the pipeline in the + Databricks user interface and it is added to sys.path when executing Python sources during pipeline + execution. :param run_as: :class:`RunAs` (optional) Write-only setting, available only in Create/Update calls. Specifies the user or service principal that the pipeline runs as. If not specified, the pipeline runs as the user who created the pipeline. diff --git a/docs/workspace/serving/serving_endpoints.rst b/docs/workspace/serving/serving_endpoints.rst index fb0c707b1..1e2e32884 100644 --- a/docs/workspace/serving/serving_endpoints.rst +++ b/docs/workspace/serving/serving_endpoints.rst @@ -60,6 +60,30 @@ .. py:method:: create_and_wait(name: str [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], config: Optional[EndpointCoreConfigInput], rate_limits: Optional[List[RateLimit]], route_optimized: Optional[bool], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed + .. py:method:: create_provisioned_throughput_endpoint(name: str, config: PtEndpointCoreConfig [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], tags: Optional[List[EndpointTag]]]) -> Wait[ServingEndpointDetailed] + + Create a new PT serving endpoint. + + :param name: str + The name of the serving endpoint. This field is required and must be unique across a Databricks + workspace. An endpoint name can consist of alphanumeric characters, dashes, and underscores. + :param config: :class:`PtEndpointCoreConfig` + The core config of the serving endpoint. + :param ai_gateway: :class:`AiGatewayConfig` (optional) + The AI Gateway configuration for the serving endpoint. + :param budget_policy_id: str (optional) + The budget policy associated with the endpoint. + :param tags: List[:class:`EndpointTag`] (optional) + Tags to be attached to the serving endpoint and automatically propagated to billing logs. + + :returns: + Long-running operation waiter for :class:`ServingEndpointDetailed`. + See :method:wait_get_serving_endpoint_not_updating for more details. + + + .. py:method:: create_provisioned_throughput_endpoint_and_wait(name: str, config: PtEndpointCoreConfig [, ai_gateway: Optional[AiGatewayConfig], budget_policy_id: Optional[str], tags: Optional[List[EndpointTag]], timeout: datetime.timedelta = 0:20:00]) -> ServingEndpointDetailed + + .. py:method:: delete(name: str) Delete a serving endpoint. @@ -345,4 +369,24 @@ :returns: :class:`ServingEndpointPermissions` + .. py:method:: update_provisioned_throughput_endpoint_config(name: str, config: PtEndpointCoreConfig) -> Wait[ServingEndpointDetailed] + + Update config of a PT serving endpoint. + + Updates any combination of the pt endpoint's served entities, the compute configuration of those + served entities, and the endpoint's traffic config. Updates are instantaneous and endpoint should be + updated instantly + + :param name: str + The name of the pt endpoint to update. This field is required. + :param config: :class:`PtEndpointCoreConfig` + + :returns: + Long-running operation waiter for :class:`ServingEndpointDetailed`. + See :method:wait_get_serving_endpoint_not_updating for more details. + + + .. py:method:: update_provisioned_throughput_endpoint_config_and_wait(name: str, config: PtEndpointCoreConfig, timeout: datetime.timedelta = 0:20:00) -> ServingEndpointDetailed + + .. py:method:: wait_get_serving_endpoint_not_updating(name: str, timeout: datetime.timedelta = 0:20:00, callback: Optional[Callable[[ServingEndpointDetailed], None]]) -> ServingEndpointDetailed diff --git a/docs/workspace/settings/disable_legacy_dbfs.rst b/docs/workspace/settings/disable_legacy_dbfs.rst index e0d5b1610..8d56e058c 100644 --- a/docs/workspace/settings/disable_legacy_dbfs.rst +++ b/docs/workspace/settings/disable_legacy_dbfs.rst @@ -4,8 +4,14 @@ .. py:class:: DisableLegacyDbfsAPI - When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new - mounts). When the setting is off, all DBFS functionality is enabled + Disabling legacy DBFS has the following implications: + + 1. Access to DBFS root and DBFS mounts is disallowed (as well as the creation of new mounts). 2. Disables + Databricks Runtime versions prior to 13.3LTS. + + When the setting is off, all DBFS functionality is enabled and no restrictions are imposed on Databricks + Runtime versions. This setting can take up to 20 minutes to take effect and requires a manual restart of + all-purpose compute clusters and SQL warehouses. .. py:method:: delete( [, etag: Optional[str]]) -> DeleteDisableLegacyDbfsResponse diff --git a/docs/workspace/settings/index.rst b/docs/workspace/settings/index.rst index f1fc7bc77..c1b45519c 100644 --- a/docs/workspace/settings/index.rst +++ b/docs/workspace/settings/index.rst @@ -22,6 +22,7 @@ Manage security settings for Accounts and Workspaces enable_notebook_table_clipboard enable_results_downloading enhanced_security_monitoring + llm_proxy_partner_powered_workspace restrict_workspace_admins token_management tokens diff --git a/docs/workspace/settings/llm_proxy_partner_powered_workspace.rst b/docs/workspace/settings/llm_proxy_partner_powered_workspace.rst new file mode 100644 index 000000000..6f464addb --- /dev/null +++ b/docs/workspace/settings/llm_proxy_partner_powered_workspace.rst @@ -0,0 +1,62 @@ +``w.settings.llm_proxy_partner_powered_workspace``: Enable Partner Powered AI Features for Workspace +==================================================================================================== +.. currentmodule:: databricks.sdk.service.settings + +.. py:class:: LlmProxyPartnerPoweredWorkspaceAPI + + Determines if partner powered models are enabled or not for a specific workspace + + .. py:method:: delete( [, etag: Optional[str]]) -> DeleteLlmProxyPartnerPoweredWorkspaceResponse + + Delete the enable partner powered AI features workspace setting. + + Reverts the enable partner powered AI features workspace setting to its default value. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`DeleteLlmProxyPartnerPoweredWorkspaceResponse` + + + .. py:method:: get( [, etag: Optional[str]]) -> LlmProxyPartnerPoweredWorkspace + + Get the enable partner powered AI features workspace setting. + + Gets the enable partner powered AI features workspace setting. + + :param etag: str (optional) + etag used for versioning. The response is at least as fresh as the eTag provided. This is used for + optimistic concurrency control as a way to help prevent simultaneous writes of a setting overwriting + each other. It is strongly suggested that systems make use of the etag in the read -> delete pattern + to perform setting deletions in order to avoid race conditions. That is, get an etag from a GET + request, and pass it with the DELETE request to identify the rule set version you are deleting. + + :returns: :class:`LlmProxyPartnerPoweredWorkspace` + + + .. py:method:: update(allow_missing: bool, setting: LlmProxyPartnerPoweredWorkspace, field_mask: str) -> LlmProxyPartnerPoweredWorkspace + + Update the enable partner powered AI features workspace setting. + + Updates the enable partner powered AI features workspace setting. + + :param allow_missing: bool + This should always be set to true for Settings API. Added for AIP compliance. + :param setting: :class:`LlmProxyPartnerPoweredWorkspace` + :param field_mask: str + The field mask must be a single string, with multiple fields separated by commas (no spaces). The + field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., + `author.given_name`). Specification of elements in sequence or map fields is not allowed, as only + the entire collection field can be specified. Field names must exactly match the resource field + names. + + A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the + fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API + changes in the future. + + :returns: :class:`LlmProxyPartnerPoweredWorkspace` + \ No newline at end of file diff --git a/docs/workspace/settings/settings.rst b/docs/workspace/settings/settings.rst index bc9cb026f..6eddb2508 100644 --- a/docs/workspace/settings/settings.rst +++ b/docs/workspace/settings/settings.rst @@ -58,8 +58,14 @@ .. py:property:: disable_legacy_dbfs :type: DisableLegacyDbfsAPI - When this setting is on, access to DBFS root and DBFS mounts is disallowed (as well as creation of new - mounts). When the setting is off, all DBFS functionality is enabled + Disabling legacy DBFS has the following implications: + + 1. Access to DBFS root and DBFS mounts is disallowed (as well as the creation of new mounts). 2. Disables + Databricks Runtime versions prior to 13.3LTS. + + When the setting is off, all DBFS functionality is enabled and no restrictions are imposed on Databricks + Runtime versions. This setting can take up to 20 minutes to take effect and requires a manual restart of + all-purpose compute clusters and SQL warehouses. .. py:property:: enable_export_notebook :type: EnableExportNotebookAPI @@ -88,6 +94,11 @@ If the compliance security profile is disabled, you can enable or disable this setting and it is not permanent. + .. py:property:: llm_proxy_partner_powered_workspace + :type: LlmProxyPartnerPoweredWorkspaceAPI + + Determines if partner powered models are enabled or not for a specific workspace + .. py:property:: restrict_workspace_admins :type: RestrictWorkspaceAdminsAPI diff --git a/docs/workspace/sharing/index.rst b/docs/workspace/sharing/index.rst index 09452b490..8882ca674 100644 --- a/docs/workspace/sharing/index.rst +++ b/docs/workspace/sharing/index.rst @@ -9,5 +9,6 @@ Configure data sharing with Unity Catalog for providers, recipients, and shares providers recipient_activation + recipient_federation_policies recipients shares \ No newline at end of file diff --git a/docs/workspace/sharing/providers.rst b/docs/workspace/sharing/providers.rst index d78dd62a0..263545400 100644 --- a/docs/workspace/sharing/providers.rst +++ b/docs/workspace/sharing/providers.rst @@ -108,25 +108,12 @@ .. code-block:: - import time - from databricks.sdk import WorkspaceClient + from databricks.sdk.service import sharing w = WorkspaceClient() - public_share_recipient = """{ - "shareCredentialsVersion":1, - "bearerToken":"dapiabcdefghijklmonpqrstuvwxyz", - "endpoint":"https://sharing.delta.io/delta-sharing/" - } - """ - - created = w.providers.create(name=f"sdk-{time.time_ns()}", recipient_profile_str=public_share_recipient) - - shares = w.providers.list_shares(name=created.name) - - # cleanup - w.providers.delete(name=created.name) + all = w.providers.list(sharing.ListProvidersRequest()) List providers. diff --git a/docs/workspace/sharing/recipient_federation_policies.rst b/docs/workspace/sharing/recipient_federation_policies.rst new file mode 100644 index 000000000..bd63cb0a8 --- /dev/null +++ b/docs/workspace/sharing/recipient_federation_policies.rst @@ -0,0 +1,125 @@ +``w.recipient_federation_policies``: Recipient Federation Policies +================================================================== +.. currentmodule:: databricks.sdk.service.sharing + +.. py:class:: RecipientFederationPoliciesAPI + + The Recipient Federation Policies APIs are only applicable in the open sharing model where the recipient + object has the authentication type of `OIDC_RECIPIENT`, enabling data sharing from Databricks to + non-Databricks recipients. OIDC Token Federation enables secure, secret-less authentication for accessing + Delta Sharing servers. Users and applications authenticate using short-lived OIDC tokens issued by their + own Identity Provider (IdP), such as Azure Entra ID or Okta, without the need for managing static + credentials or client secrets. A federation policy defines how non-Databricks recipients authenticate + using OIDC tokens. It validates the OIDC claims in federated tokens and is set at the recipient level. The + caller must be the owner of the recipient to create or manage a federation policy. Federation policies + support the following scenarios: - User-to-Machine (U2M) flow: A user accesses Delta Shares using their + own identity, such as connecting through PowerBI Delta Sharing Client. - Machine-to-Machine (M2M) flow: An + application accesses Delta Shares using its own identity, typically for automation tasks like nightly jobs + through Python Delta Sharing Client. OIDC Token Federation enables fine-grained access control, supports + Multi-Factor Authentication (MFA), and enhances security by minimizing the risk of credential leakage + through the use of short-lived, expiring tokens. It is designed for strong identity governance, secure + cross-platform data sharing, and reduced operational overhead for credential management. + + For more information, see + https://www.databricks.com/blog/announcing-oidc-token-federation-enhanced-delta-sharing-security and + https://docs.databricks.com/en/delta-sharing/create-recipient-oidc-fed + + .. py:method:: create(recipient_name: str, policy: FederationPolicy) -> FederationPolicy + + Create recipient federation policy. + + Create a federation policy for an OIDC_FEDERATION recipient for sharing data from Databricks to + non-Databricks recipients. The caller must be the owner of the recipient. When sharing data from + Databricks to non-Databricks clients, you can define a federation policy to authenticate + non-Databricks recipients. The federation policy validates OIDC claims in federated tokens and is + defined at the recipient level. This enables secretless sharing clients to authenticate using OIDC + tokens. + + Supported scenarios for federation policies: 1. **User-to-Machine (U2M) flow** (e.g., PowerBI): A user + accesses a resource using their own identity. 2. **Machine-to-Machine (M2M) flow** (e.g., OAuth App): + An OAuth App accesses a resource using its own identity, typically for tasks like running nightly + jobs. + + For an overview, refer to: - Blog post: Overview of feature: + https://www.databricks.com/blog/announcing-oidc-token-federation-enhanced-delta-sharing-security + + For detailed configuration guides based on your use case: - Creating a Federation Policy as a + provider: https://docs.databricks.com/en/delta-sharing/create-recipient-oidc-fed - Configuration and + usage for Machine-to-Machine (M2M) applications (e.g., Python Delta Sharing Client): + https://docs.databricks.com/aws/en/delta-sharing/sharing-over-oidc-m2m - Configuration and usage for + User-to-Machine (U2M) applications (e.g., PowerBI): + https://docs.databricks.com/aws/en/delta-sharing/sharing-over-oidc-u2m + + :param recipient_name: str + Name of the recipient. This is the name of the recipient for which the policy is being created. + :param policy: :class:`FederationPolicy` + + :returns: :class:`FederationPolicy` + + + .. py:method:: delete(recipient_name: str, name: str) + + Delete recipient federation policy. + + Deletes an existing federation policy for an OIDC_FEDERATION recipient. The caller must be the owner + of the recipient. + + :param recipient_name: str + Name of the recipient. This is the name of the recipient for which the policy is being deleted. + :param name: str + Name of the policy. This is the name of the policy to be deleted. + + + + + .. py:method:: get_federation_policy(recipient_name: str, name: str) -> FederationPolicy + + Get recipient federation policy. + + Reads an existing federation policy for an OIDC_FEDERATION recipient for sharing data from Databricks + to non-Databricks recipients. The caller must have read access to the recipient. + + :param recipient_name: str + Name of the recipient. This is the name of the recipient for which the policy is being retrieved. + :param name: str + Name of the policy. This is the name of the policy to be retrieved. + + :returns: :class:`FederationPolicy` + + + .. py:method:: list(recipient_name: str [, max_results: Optional[int], page_token: Optional[str]]) -> Iterator[FederationPolicy] + + List recipient federation policies. + + Lists federation policies for an OIDC_FEDERATION recipient for sharing data from Databricks to + non-Databricks recipients. The caller must have read access to the recipient. + + :param recipient_name: str + Name of the recipient. This is the name of the recipient for which the policies are being listed. + :param max_results: int (optional) + :param page_token: str (optional) + + :returns: Iterator over :class:`FederationPolicy` + + + .. py:method:: update(recipient_name: str, name: str, policy: FederationPolicy [, update_mask: Optional[str]]) -> FederationPolicy + + Update recipient federation policy. + + Updates an existing federation policy for an OIDC_RECIPIENT. The caller must be the owner of the + recipient. + + :param recipient_name: str + Name of the recipient. This is the name of the recipient for which the policy is being updated. + :param name: str + Name of the policy. This is the name of the current name of the policy. + :param policy: :class:`FederationPolicy` + :param update_mask: str (optional) + The field mask specifies which fields of the policy to update. To specify multiple fields in the + field mask, use comma as the separator (no space). The special value '*' indicates that all fields + should be updated (full replacement). If unspecified, all fields that are set in the policy provided + in the update request will overwrite the corresponding fields in the existing policy. Example value: + 'comment,oidc_policy.audiences'. + + :returns: :class:`FederationPolicy` + \ No newline at end of file diff --git a/docs/workspace/sharing/shares.rst b/docs/workspace/sharing/shares.rst index 1fafe755f..8d1dfea02 100644 --- a/docs/workspace/sharing/shares.rst +++ b/docs/workspace/sharing/shares.rst @@ -229,7 +229,7 @@ :returns: :class:`ShareInfo` - .. py:method:: update_permissions(name: str [, changes: Optional[List[PermissionsChange]]]) -> UpdateSharePermissionsResponse + .. py:method:: update_permissions(name: str [, changes: Optional[List[PermissionsChange]], omit_permissions_list: Optional[bool]]) -> UpdateSharePermissionsResponse Update permissions. @@ -243,6 +243,8 @@ The name of the share. :param changes: List[:class:`PermissionsChange`] (optional) Array of permission changes. + :param omit_permissions_list: bool (optional) + Optional. Whether to return the latest permissions list of the share in the response. :returns: :class:`UpdateSharePermissionsResponse` \ No newline at end of file diff --git a/docs/workspace/sql/alerts.rst b/docs/workspace/sql/alerts.rst index 11b4d7d65..be09efec5 100644 --- a/docs/workspace/sql/alerts.rst +++ b/docs/workspace/sql/alerts.rst @@ -152,7 +152,7 @@ :returns: Iterator over :class:`ListAlertsResponseAlert` - .. py:method:: update(id: str, update_mask: str [, alert: Optional[UpdateAlertRequestAlert]]) -> Alert + .. py:method:: update(id: str, update_mask: str [, alert: Optional[UpdateAlertRequestAlert], auto_resolve_display_name: Optional[bool]]) -> Alert Usage: @@ -215,6 +215,9 @@ fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. :param alert: :class:`UpdateAlertRequestAlert` (optional) + :param auto_resolve_display_name: bool (optional) + If true, automatically resolve alert display name conflicts. Otherwise, fail the request if the + alert's display name conflicts with an existing alert's display name. :returns: :class:`Alert` \ No newline at end of file diff --git a/docs/workspace/sql/alerts_v2.rst b/docs/workspace/sql/alerts_v2.rst index 96b004a7d..2ff773949 100644 --- a/docs/workspace/sql/alerts_v2.rst +++ b/docs/workspace/sql/alerts_v2.rst @@ -6,13 +6,13 @@ TODO: Add description - .. py:method:: create_alert( [, alert: Optional[AlertV2]]) -> AlertV2 + .. py:method:: create_alert(alert: AlertV2) -> AlertV2 Create an alert. Create Alert - :param alert: :class:`AlertV2` (optional) + :param alert: :class:`AlertV2` :returns: :class:`AlertV2` @@ -28,7 +28,7 @@ :returns: :class:`AlertV2` - .. py:method:: list_alerts( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[ListAlertsV2ResponseAlert] + .. py:method:: list_alerts( [, page_size: Optional[int], page_token: Optional[str]]) -> Iterator[AlertV2] List alerts. @@ -37,7 +37,7 @@ :param page_size: int (optional) :param page_token: str (optional) - :returns: Iterator over :class:`ListAlertsV2ResponseAlert` + :returns: Iterator over :class:`AlertV2` .. py:method:: trash_alert(id: str) @@ -53,7 +53,7 @@ - .. py:method:: update_alert(id: str, update_mask: str [, alert: Optional[AlertV2]]) -> AlertV2 + .. py:method:: update_alert(id: str, alert: AlertV2, update_mask: str) -> AlertV2 Update an alert. @@ -61,6 +61,7 @@ :param id: str UUID identifying the alert. + :param alert: :class:`AlertV2` :param update_mask: str The field mask must be a single string, with multiple fields separated by commas (no spaces). The field path is relative to the resource object, using a dot (`.`) to navigate sub-fields (e.g., @@ -71,7 +72,6 @@ A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. - :param alert: :class:`AlertV2` (optional) :returns: :class:`AlertV2` \ No newline at end of file diff --git a/docs/workspace/sql/queries.rst b/docs/workspace/sql/queries.rst index 14c0ef0b6..f8553bead 100644 --- a/docs/workspace/sql/queries.rst +++ b/docs/workspace/sql/queries.rst @@ -126,7 +126,7 @@ :returns: Iterator over :class:`Visualization` - .. py:method:: update(id: str, update_mask: str [, query: Optional[UpdateQueryRequestQuery]]) -> Query + .. py:method:: update(id: str, update_mask: str [, auto_resolve_display_name: Optional[bool], query: Optional[UpdateQueryRequestQuery]]) -> Query Usage: @@ -179,6 +179,9 @@ A field mask of `*` indicates full replacement. It’s recommended to always explicitly list the fields being updated and avoid using `*` wildcards, as it can lead to unintended results if the API changes in the future. + :param auto_resolve_display_name: bool (optional) + If true, automatically resolve alert display name conflicts. Otherwise, fail the request if the + alert's display name conflicts with an existing alert's display name. :param query: :class:`UpdateQueryRequestQuery` (optional) :returns: :class:`Query` diff --git a/docs/workspace/workspace/workspace.rst b/docs/workspace/workspace/workspace.rst index e31c53fac..a33023065 100644 --- a/docs/workspace/workspace/workspace.rst +++ b/docs/workspace/workspace/workspace.rst @@ -185,18 +185,11 @@ notebook_path = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" w.workspace.import_( - path=notebook_path, - overwrite=True, + content=base64.b64encode(("CREATE LIVE TABLE dlt_sample AS SELECT 1").encode()).decode(), format=workspace.ImportFormat.SOURCE, - language=workspace.Language.PYTHON, - content=base64.b64encode( - ( - """import time - time.sleep(10) - dbutils.notebook.exit('hello') - """ - ).encode() - ).decode(), + language=workspace.Language.SQL, + overwrite=True, + path=notebook_path, ) Import a workspace object. @@ -242,14 +235,16 @@ .. code-block:: + import os + import time + from databricks.sdk import WorkspaceClient w = WorkspaceClient() - names = [] - for i in w.workspace.list(f"/Users/{w.current_user.me().user_name}", recursive=True): - names.append(i.path) - assert len(names) > 0 + notebook = f"/Users/{w.current_user.me().user_name}/sdk-{time.time_ns()}" + + objects = w.workspace.list(path=os.path.dirname(notebook)) List workspace objects From 442a2a9e4baf78a7eab5806b8d0fac6ca2cecf3f Mon Sep 17 00:00:00 2001 From: Ubuntu Date: Mon, 26 May 2025 17:12:12 +0000 Subject: [PATCH 2/2] Add changelogs --- NEXT_CHANGELOG.md | 3 +++ 1 file changed, 3 insertions(+) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 4d66a7489..7d66cab4f 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -11,6 +11,9 @@ ### Bug Fixes +- Fix a reported highlighting problem with the way API clients are imported in WorkspaceClient/AccountClient + ([#979](https://github.com/databricks/databricks-sdk-py/pull/979)). + ### Documentation ### Internal Changes