From 74c302be43581f244088c93cd7c113a9895ef330 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 30 Apr 2025 08:12:28 +0000 Subject: [PATCH 1/3] Bump API specification to 30 Apr 2025 --- .codegen/_openapi_sha | 2 +- NEXT_CHANGELOG.md | 19 + account_client.go | 9 +- experimental/mocks/mock_workspace_client.go | 9 + .../mock_network_connectivity_interface.go | 59 ++ .../service/sql/mock_alerts_v2_interface.go | 595 ++++++++++++++++++ service/apps/model.go | 6 +- service/billing/model.go | 2 +- service/catalog/model.go | 28 +- service/cleanrooms/model.go | 8 +- service/compute/model.go | 17 +- service/dashboards/model.go | 10 +- service/jobs/model.go | 19 +- service/oauth2/model.go | 8 +- service/pkg.go | 5 +- service/serving/model.go | 51 +- service/settings/api.go | 35 +- service/settings/impl.go | 18 +- service/settings/interface.go | 31 +- service/settings/model.go | 239 +++---- service/sql/api.go | 157 ++++- service/sql/impl.go | 92 +++ service/sql/interface.go | 33 + service/sql/model.go | 453 ++++++++++++- workspace_client.go | 4 + 25 files changed, 1714 insertions(+), 195 deletions(-) create mode 100644 experimental/mocks/service/sql/mock_alerts_v2_interface.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 8cd956362..e7f752fb5 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -06a18b97d7996d6cd8dd88bfdb0f2c2792739e46 \ No newline at end of file +ce962ccd0a078a5a9d89494fe38d237ce377d5f3 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index c5cdbb641..e9d122be8 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -12,3 +12,22 @@ ### Internal Changes ### API Changes +* Added [w.AlertsV2](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API) workspace-level service. +* Added `UpdateNccAzurePrivateEndpointRulePublic` method for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service. +* Added `CreatedAt`, `CreatedBy` and `MetastoreId` fields for [catalog.SetArtifactAllowlist](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#SetArtifactAllowlist). +* [Breaking] Added `NetworkConnectivityConfig` field for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). +* [Breaking] Added `PrivateEndpointRule` field for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). +* Added `DomainNames` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule). +* Added `AutoResolveDisplayName` field for [sql.CreateAlertRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateAlertRequest). +* Added `AutoResolveDisplayName` field for [sql.CreateQueryRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateQueryRequest). +* Added `CreateCleanRoom`, `ExecuteCleanRoomTask` and `ModifyCleanRoom` enum values for [catalog.Privilege](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#Privilege). +* Added `DnsResolutionError` and `GcpDeniedByOrgPolicy` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* Added `Expired` enum value for [settings.NccAzurePrivateEndpointRuleConnectionState](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleConnectionState). +* [Breaking] Changed `CreateNetworkConnectivityConfiguration` and `CreatePrivateEndpointRule` methods for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service with new required argument order. +* [Breaking] Changed `WorkloadSize` field for [serving.ServedModelInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInput) to type `string`. +* [Breaking] Changed `GroupId` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule) to type `string`. +* [Breaking] Changed `TargetServices` field for [settings.NccAzureServiceEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzureServiceEndpointRule) to type [settings.EgressResourceTypeList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EgressResourceTypeList). +* [Breaking] Removed `Name` and `Region` fields for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). +* [Breaking] Removed `GroupId` and `ResourceId` fields for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). +* [Breaking] Removed `Large`, `Medium` and `Small` enum values for [serving.ServedModelInputWorkloadSize](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInputWorkloadSize). +* [Breaking] Removed `Blob`, `Dfs`, `MysqlServer` and `SqlServer` enum values for [settings.NccAzurePrivateEndpointRuleGroupId](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleGroupId). diff --git a/account_client.go b/account_client.go index fb8aca2e6..d431b9dd1 100755 --- a/account_client.go +++ b/account_client.go @@ -224,7 +224,14 @@ type AccountClient struct { Metastores catalog.AccountMetastoresInterface // These APIs provide configurations for the network connectivity of your - // workspaces for serverless compute resources. + // workspaces for serverless compute resources. This API provides stable + // subnets for your workspace so that you can configure your firewalls on + // your Azure Storage accounts to allow access from Databricks. You can also + // use the API to provision private endpoints for Databricks to privately + // connect serverless compute resources to your Azure resources using Azure + // Private Link. See [configure serverless secure connectivity]. + // + // [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security NetworkConnectivity settings.NetworkConnectivityInterface // These APIs manage network configurations for customer-managed VPCs diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index a14f2697e..fc96bd2a4 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -45,6 +45,7 @@ func NewMockWorkspaceClient(t interface { AccountAccessControlProxy: iam.NewMockAccountAccessControlProxyInterface(t), Alerts: sql.NewMockAlertsInterface(t), AlertsLegacy: sql.NewMockAlertsLegacyInterface(t), + AlertsV2: sql.NewMockAlertsV2Interface(t), Apps: apps.NewMockAppsInterface(t), ArtifactAllowlists: catalog.NewMockArtifactAllowlistsInterface(t), Catalogs: catalog.NewMockCatalogsInterface(t), @@ -312,6 +313,14 @@ func (m *MockWorkspaceClient) GetMockAlertsLegacyAPI() *sql.MockAlertsLegacyInte return api } +func (m *MockWorkspaceClient) GetMockAlertsV2API() *sql.MockAlertsV2Interface { + api, ok := m.WorkspaceClient.AlertsV2.(*sql.MockAlertsV2Interface) + if !ok { + panic(fmt.Sprintf("expected AlertsV2 to be *sql.MockAlertsV2Interface, actual was %T", m.WorkspaceClient.AlertsV2)) + } + return api +} + func (m *MockWorkspaceClient) GetMockAppsAPI() *apps.MockAppsInterface { api, ok := m.WorkspaceClient.Apps.(*apps.MockAppsInterface) if !ok { diff --git a/experimental/mocks/service/settings/mock_network_connectivity_interface.go b/experimental/mocks/service/settings/mock_network_connectivity_interface.go index d0d65089d..52ec9fabc 100644 --- a/experimental/mocks/service/settings/mock_network_connectivity_interface.go +++ b/experimental/mocks/service/settings/mock_network_connectivity_interface.go @@ -867,6 +867,65 @@ func (_c *MockNetworkConnectivityInterface_ListPrivateEndpointRulesByNetworkConn return _c } +// UpdateNccAzurePrivateEndpointRulePublic provides a mock function with given fields: ctx, request +func (_m *MockNetworkConnectivityInterface) UpdateNccAzurePrivateEndpointRulePublic(ctx context.Context, request settings.UpdateNccAzurePrivateEndpointRulePublicRequest) (*settings.NccAzurePrivateEndpointRule, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for UpdateNccAzurePrivateEndpointRulePublic") + } + + var r0 *settings.NccAzurePrivateEndpointRule + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateNccAzurePrivateEndpointRulePublicRequest) (*settings.NccAzurePrivateEndpointRule, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateNccAzurePrivateEndpointRulePublicRequest) *settings.NccAzurePrivateEndpointRule); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.NccAzurePrivateEndpointRule) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateNccAzurePrivateEndpointRulePublicRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateNccAzurePrivateEndpointRulePublic' +type MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call struct { + *mock.Call +} + +// UpdateNccAzurePrivateEndpointRulePublic is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateNccAzurePrivateEndpointRulePublicRequest +func (_e *MockNetworkConnectivityInterface_Expecter) UpdateNccAzurePrivateEndpointRulePublic(ctx interface{}, request interface{}) *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call { + return &MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call{Call: _e.mock.On("UpdateNccAzurePrivateEndpointRulePublic", ctx, request)} +} + +func (_c *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call) Run(run func(ctx context.Context, request settings.UpdateNccAzurePrivateEndpointRulePublicRequest)) *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateNccAzurePrivateEndpointRulePublicRequest)) + }) + return _c +} + +func (_c *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call) Return(_a0 *settings.NccAzurePrivateEndpointRule, _a1 error) *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call) RunAndReturn(run func(context.Context, settings.UpdateNccAzurePrivateEndpointRulePublicRequest) (*settings.NccAzurePrivateEndpointRule, error)) *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call { + _c.Call.Return(run) + return _c +} + // NewMockNetworkConnectivityInterface creates a new instance of MockNetworkConnectivityInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMockNetworkConnectivityInterface(t interface { diff --git a/experimental/mocks/service/sql/mock_alerts_v2_interface.go b/experimental/mocks/service/sql/mock_alerts_v2_interface.go new file mode 100644 index 000000000..d48e31c99 --- /dev/null +++ b/experimental/mocks/service/sql/mock_alerts_v2_interface.go @@ -0,0 +1,595 @@ +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package sql + +import ( + context "context" + + listing "github.com/databricks/databricks-sdk-go/listing" + mock "github.com/stretchr/testify/mock" + + sql "github.com/databricks/databricks-sdk-go/service/sql" +) + +// MockAlertsV2Interface is an autogenerated mock type for the AlertsV2Interface type +type MockAlertsV2Interface struct { + mock.Mock +} + +type MockAlertsV2Interface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockAlertsV2Interface) EXPECT() *MockAlertsV2Interface_Expecter { + return &MockAlertsV2Interface_Expecter{mock: &_m.Mock} +} + +// CreateAlert provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) CreateAlert(ctx context.Context, request sql.CreateAlertV2Request) (*sql.AlertV2, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for CreateAlert") + } + + var r0 *sql.AlertV2 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.CreateAlertV2Request) (*sql.AlertV2, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.CreateAlertV2Request) *sql.AlertV2); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.AlertV2) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.CreateAlertV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_CreateAlert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateAlert' +type MockAlertsV2Interface_CreateAlert_Call struct { + *mock.Call +} + +// CreateAlert is a helper method to define mock.On call +// - ctx context.Context +// - request sql.CreateAlertV2Request +func (_e *MockAlertsV2Interface_Expecter) CreateAlert(ctx interface{}, request interface{}) *MockAlertsV2Interface_CreateAlert_Call { + return &MockAlertsV2Interface_CreateAlert_Call{Call: _e.mock.On("CreateAlert", ctx, request)} +} + +func (_c *MockAlertsV2Interface_CreateAlert_Call) Run(run func(ctx context.Context, request sql.CreateAlertV2Request)) *MockAlertsV2Interface_CreateAlert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.CreateAlertV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_CreateAlert_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_CreateAlert_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_CreateAlert_Call) RunAndReturn(run func(context.Context, sql.CreateAlertV2Request) (*sql.AlertV2, error)) *MockAlertsV2Interface_CreateAlert_Call { + _c.Call.Return(run) + return _c +} + +// GetAlert provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) GetAlert(ctx context.Context, request sql.GetAlertV2Request) (*sql.AlertV2, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetAlert") + } + + var r0 *sql.AlertV2 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.GetAlertV2Request) (*sql.AlertV2, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.GetAlertV2Request) *sql.AlertV2); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.AlertV2) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.GetAlertV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_GetAlert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAlert' +type MockAlertsV2Interface_GetAlert_Call struct { + *mock.Call +} + +// GetAlert is a helper method to define mock.On call +// - ctx context.Context +// - request sql.GetAlertV2Request +func (_e *MockAlertsV2Interface_Expecter) GetAlert(ctx interface{}, request interface{}) *MockAlertsV2Interface_GetAlert_Call { + return &MockAlertsV2Interface_GetAlert_Call{Call: _e.mock.On("GetAlert", ctx, request)} +} + +func (_c *MockAlertsV2Interface_GetAlert_Call) Run(run func(ctx context.Context, request sql.GetAlertV2Request)) *MockAlertsV2Interface_GetAlert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.GetAlertV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_GetAlert_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_GetAlert_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_GetAlert_Call) RunAndReturn(run func(context.Context, sql.GetAlertV2Request) (*sql.AlertV2, error)) *MockAlertsV2Interface_GetAlert_Call { + _c.Call.Return(run) + return _c +} + +// GetAlertById provides a mock function with given fields: ctx, id +func (_m *MockAlertsV2Interface) GetAlertById(ctx context.Context, id string) (*sql.AlertV2, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetAlertById") + } + + var r0 *sql.AlertV2 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*sql.AlertV2, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *sql.AlertV2); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.AlertV2) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_GetAlertById_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAlertById' +type MockAlertsV2Interface_GetAlertById_Call struct { + *mock.Call +} + +// GetAlertById is a helper method to define mock.On call +// - ctx context.Context +// - id string +func (_e *MockAlertsV2Interface_Expecter) GetAlertById(ctx interface{}, id interface{}) *MockAlertsV2Interface_GetAlertById_Call { + return &MockAlertsV2Interface_GetAlertById_Call{Call: _e.mock.On("GetAlertById", ctx, id)} +} + +func (_c *MockAlertsV2Interface_GetAlertById_Call) Run(run func(ctx context.Context, id string)) *MockAlertsV2Interface_GetAlertById_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_GetAlertById_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_GetAlertById_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_GetAlertById_Call) RunAndReturn(run func(context.Context, string) (*sql.AlertV2, error)) *MockAlertsV2Interface_GetAlertById_Call { + _c.Call.Return(run) + return _c +} + +// GetByDisplayName provides a mock function with given fields: ctx, name +func (_m *MockAlertsV2Interface) GetByDisplayName(ctx context.Context, name string) (*sql.ListAlertsV2ResponseAlert, error) { + ret := _m.Called(ctx, name) + + if len(ret) == 0 { + panic("no return value specified for GetByDisplayName") + } + + var r0 *sql.ListAlertsV2ResponseAlert + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*sql.ListAlertsV2ResponseAlert, error)); ok { + return rf(ctx, name) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *sql.ListAlertsV2ResponseAlert); ok { + r0 = rf(ctx, name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.ListAlertsV2ResponseAlert) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_GetByDisplayName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByDisplayName' +type MockAlertsV2Interface_GetByDisplayName_Call struct { + *mock.Call +} + +// GetByDisplayName is a helper method to define mock.On call +// - ctx context.Context +// - name string +func (_e *MockAlertsV2Interface_Expecter) GetByDisplayName(ctx interface{}, name interface{}) *MockAlertsV2Interface_GetByDisplayName_Call { + return &MockAlertsV2Interface_GetByDisplayName_Call{Call: _e.mock.On("GetByDisplayName", ctx, name)} +} + +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Run(run func(ctx context.Context, name string)) *MockAlertsV2Interface_GetByDisplayName_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Return(_a0 *sql.ListAlertsV2ResponseAlert, _a1 error) *MockAlertsV2Interface_GetByDisplayName_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) RunAndReturn(run func(context.Context, string) (*sql.ListAlertsV2ResponseAlert, error)) *MockAlertsV2Interface_GetByDisplayName_Call { + _c.Call.Return(run) + return _c +} + +// ListAlerts provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) ListAlerts(ctx context.Context, request sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert] { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ListAlerts") + } + + var r0 listing.Iterator[sql.ListAlertsV2ResponseAlert] + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert]); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(listing.Iterator[sql.ListAlertsV2ResponseAlert]) + } + } + + return r0 +} + +// MockAlertsV2Interface_ListAlerts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAlerts' +type MockAlertsV2Interface_ListAlerts_Call struct { + *mock.Call +} + +// ListAlerts is a helper method to define mock.On call +// - ctx context.Context +// - request sql.ListAlertsV2Request +func (_e *MockAlertsV2Interface_Expecter) ListAlerts(ctx interface{}, request interface{}) *MockAlertsV2Interface_ListAlerts_Call { + return &MockAlertsV2Interface_ListAlerts_Call{Call: _e.mock.On("ListAlerts", ctx, request)} +} + +func (_c *MockAlertsV2Interface_ListAlerts_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_ListAlerts_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlerts_Call) Return(_a0 listing.Iterator[sql.ListAlertsV2ResponseAlert]) *MockAlertsV2Interface_ListAlerts_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlerts_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert]) *MockAlertsV2Interface_ListAlerts_Call { + _c.Call.Return(run) + return _c +} + +// ListAlertsAll provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) ListAlertsAll(ctx context.Context, request sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ListAlertsAll") + } + + var r0 []sql.ListAlertsV2ResponseAlert + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) []sql.ListAlertsV2ResponseAlert); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]sql.ListAlertsV2ResponseAlert) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.ListAlertsV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_ListAlertsAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAlertsAll' +type MockAlertsV2Interface_ListAlertsAll_Call struct { + *mock.Call +} + +// ListAlertsAll is a helper method to define mock.On call +// - ctx context.Context +// - request sql.ListAlertsV2Request +func (_e *MockAlertsV2Interface_Expecter) ListAlertsAll(ctx interface{}, request interface{}) *MockAlertsV2Interface_ListAlertsAll_Call { + return &MockAlertsV2Interface_ListAlertsAll_Call{Call: _e.mock.On("ListAlertsAll", ctx, request)} +} + +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_ListAlertsAll_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Return(_a0 []sql.ListAlertsV2ResponseAlert, _a1 error) *MockAlertsV2Interface_ListAlertsAll_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error)) *MockAlertsV2Interface_ListAlertsAll_Call { + _c.Call.Return(run) + return _c +} + +// ListAlertsV2ResponseAlertDisplayNameToIdMap provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request sql.ListAlertsV2Request) (map[string]string, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ListAlertsV2ResponseAlertDisplayNameToIdMap") + } + + var r0 map[string]string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) map[string]string); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.ListAlertsV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAlertsV2ResponseAlertDisplayNameToIdMap' +type MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call struct { + *mock.Call +} + +// ListAlertsV2ResponseAlertDisplayNameToIdMap is a helper method to define mock.On call +// - ctx context.Context +// - request sql.ListAlertsV2Request +func (_e *MockAlertsV2Interface_Expecter) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx interface{}, request interface{}) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { + return &MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call{Call: _e.mock.On("ListAlertsV2ResponseAlertDisplayNameToIdMap", ctx, request)} +} + +func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) Return(_a0 map[string]string, _a1 error) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { + _c.Call.Return(run) + return _c +} + +// TrashAlert provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) TrashAlert(ctx context.Context, request sql.TrashAlertV2Request) error { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for TrashAlert") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, sql.TrashAlertV2Request) error); ok { + r0 = rf(ctx, request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockAlertsV2Interface_TrashAlert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TrashAlert' +type MockAlertsV2Interface_TrashAlert_Call struct { + *mock.Call +} + +// TrashAlert is a helper method to define mock.On call +// - ctx context.Context +// - request sql.TrashAlertV2Request +func (_e *MockAlertsV2Interface_Expecter) TrashAlert(ctx interface{}, request interface{}) *MockAlertsV2Interface_TrashAlert_Call { + return &MockAlertsV2Interface_TrashAlert_Call{Call: _e.mock.On("TrashAlert", ctx, request)} +} + +func (_c *MockAlertsV2Interface_TrashAlert_Call) Run(run func(ctx context.Context, request sql.TrashAlertV2Request)) *MockAlertsV2Interface_TrashAlert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.TrashAlertV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_TrashAlert_Call) Return(_a0 error) *MockAlertsV2Interface_TrashAlert_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockAlertsV2Interface_TrashAlert_Call) RunAndReturn(run func(context.Context, sql.TrashAlertV2Request) error) *MockAlertsV2Interface_TrashAlert_Call { + _c.Call.Return(run) + return _c +} + +// TrashAlertById provides a mock function with given fields: ctx, id +func (_m *MockAlertsV2Interface) TrashAlertById(ctx context.Context, id string) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for TrashAlertById") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockAlertsV2Interface_TrashAlertById_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TrashAlertById' +type MockAlertsV2Interface_TrashAlertById_Call struct { + *mock.Call +} + +// TrashAlertById is a helper method to define mock.On call +// - ctx context.Context +// - id string +func (_e *MockAlertsV2Interface_Expecter) TrashAlertById(ctx interface{}, id interface{}) *MockAlertsV2Interface_TrashAlertById_Call { + return &MockAlertsV2Interface_TrashAlertById_Call{Call: _e.mock.On("TrashAlertById", ctx, id)} +} + +func (_c *MockAlertsV2Interface_TrashAlertById_Call) Run(run func(ctx context.Context, id string)) *MockAlertsV2Interface_TrashAlertById_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_TrashAlertById_Call) Return(_a0 error) *MockAlertsV2Interface_TrashAlertById_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockAlertsV2Interface_TrashAlertById_Call) RunAndReturn(run func(context.Context, string) error) *MockAlertsV2Interface_TrashAlertById_Call { + _c.Call.Return(run) + return _c +} + +// UpdateAlert provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) UpdateAlert(ctx context.Context, request sql.UpdateAlertV2Request) (*sql.AlertV2, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for UpdateAlert") + } + + var r0 *sql.AlertV2 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.UpdateAlertV2Request) (*sql.AlertV2, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.UpdateAlertV2Request) *sql.AlertV2); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.AlertV2) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.UpdateAlertV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_UpdateAlert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateAlert' +type MockAlertsV2Interface_UpdateAlert_Call struct { + *mock.Call +} + +// UpdateAlert is a helper method to define mock.On call +// - ctx context.Context +// - request sql.UpdateAlertV2Request +func (_e *MockAlertsV2Interface_Expecter) UpdateAlert(ctx interface{}, request interface{}) *MockAlertsV2Interface_UpdateAlert_Call { + return &MockAlertsV2Interface_UpdateAlert_Call{Call: _e.mock.On("UpdateAlert", ctx, request)} +} + +func (_c *MockAlertsV2Interface_UpdateAlert_Call) Run(run func(ctx context.Context, request sql.UpdateAlertV2Request)) *MockAlertsV2Interface_UpdateAlert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.UpdateAlertV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_UpdateAlert_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_UpdateAlert_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_UpdateAlert_Call) RunAndReturn(run func(context.Context, sql.UpdateAlertV2Request) (*sql.AlertV2, error)) *MockAlertsV2Interface_UpdateAlert_Call { + _c.Call.Return(run) + return _c +} + +// NewMockAlertsV2Interface creates a new instance of MockAlertsV2Interface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockAlertsV2Interface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockAlertsV2Interface { + mock := &MockAlertsV2Interface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/service/apps/model.go b/service/apps/model.go index 59d2b1a59..e15a6693e 100755 --- a/service/apps/model.go +++ b/service/apps/model.go @@ -611,14 +611,14 @@ func (s ComputeStatus) MarshalJSON() ([]byte, error) { // Create an app deployment type CreateAppDeploymentRequest struct { - AppDeployment *AppDeployment `json:"app_deployment,omitempty"` + AppDeployment AppDeployment `json:"app_deployment"` // The name of the app. AppName string `json:"-" url:"-"` } // Create an app type CreateAppRequest struct { - App *App `json:"app,omitempty"` + App App `json:"app"` // If true, the app will not be started after creation. NoCompute bool `json:"-" url:"no_compute,omitempty"` @@ -755,7 +755,7 @@ type StopAppRequest struct { // Update an app type UpdateAppRequest struct { - App *App `json:"app,omitempty"` + App App `json:"app"` // The name of the app. The name must contain only lowercase alphanumeric // characters and hyphens. It must be unique within the workspace. Name string `json:"-" url:"-"` diff --git a/service/billing/model.go b/service/billing/model.go index 3de61625b..4f2106982 100644 --- a/service/billing/model.go +++ b/service/billing/model.go @@ -1115,7 +1115,7 @@ type UpdateBudgetPolicyRequest struct { // BudgetPolicy LimitConfig *LimitConfig `json:"-" url:"limit_config,omitempty"` // Contains the BudgetPolicy details. - Policy *BudgetPolicy `json:"policy,omitempty"` + Policy BudgetPolicy `json:"policy"` // The Id of the policy. This field is generated by Databricks and globally // unique. PolicyId string `json:"-" url:"-"` diff --git a/service/catalog/model.go b/service/catalog/model.go index 9708c90cd..d5bc4af35 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -1116,7 +1116,7 @@ func (s CreateMonitor) MarshalJSON() ([]byte, error) { // Create an Online Table type CreateOnlineTableRequest struct { // Online Table information. - Table *OnlineTable `json:"table,omitempty"` + Table OnlineTable `json:"table"` } type CreateRegisteredModelRequest struct { @@ -4440,6 +4440,8 @@ const PrivilegeCreate Privilege = `CREATE` const PrivilegeCreateCatalog Privilege = `CREATE_CATALOG` +const PrivilegeCreateCleanRoom Privilege = `CREATE_CLEAN_ROOM` + const PrivilegeCreateConnection Privilege = `CREATE_CONNECTION` const PrivilegeCreateExternalLocation Privilege = `CREATE_EXTERNAL_LOCATION` @@ -4480,12 +4482,16 @@ const PrivilegeCreateVolume Privilege = `CREATE_VOLUME` const PrivilegeExecute Privilege = `EXECUTE` +const PrivilegeExecuteCleanRoomTask Privilege = `EXECUTE_CLEAN_ROOM_TASK` + const PrivilegeManage Privilege = `MANAGE` const PrivilegeManageAllowlist Privilege = `MANAGE_ALLOWLIST` const PrivilegeModify Privilege = `MODIFY` +const PrivilegeModifyCleanRoom Privilege = `MODIFY_CLEAN_ROOM` + const PrivilegeReadFiles Privilege = `READ_FILES` const PrivilegeReadPrivateFiles Privilege = `READ_PRIVATE_FILES` @@ -4528,11 +4534,11 @@ func (f *Privilege) String() string { // Set raw string value and validate it against allowed values func (f *Privilege) Set(v string) error { switch v { - case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `BROWSE`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `BROWSE`, `CREATE`, `CREATE_CATALOG`, `CREATE_CLEAN_ROOM`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `EXECUTE_CLEAN_ROOM_TASK`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `MODIFY_CLEAN_ROOM`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: *f = Privilege(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "BROWSE", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "BROWSE", "CREATE", "CREATE_CATALOG", "CREATE_CLEAN_ROOM", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "EXECUTE_CLEAN_ROOM_TASK", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "MODIFY_CLEAN_ROOM", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) } } @@ -4897,6 +4903,22 @@ type SetArtifactAllowlist struct { ArtifactMatchers []ArtifactMatcher `json:"artifact_matchers"` // The artifact type of the allowlist. ArtifactType ArtifactType `json:"-" url:"-"` + // Time at which this artifact allowlist was set, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of the user who set the artifact allowlist. + CreatedBy string `json:"created_by,omitempty"` + // Unique identifier of parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *SetArtifactAllowlist) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SetArtifactAllowlist) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type SetRegisteredModelAliasRequest struct { diff --git a/service/cleanrooms/model.go b/service/cleanrooms/model.go index 7b7a441a0..468cbfa6c 100755 --- a/service/cleanrooms/model.go +++ b/service/cleanrooms/model.go @@ -534,7 +534,7 @@ func (s ComplianceSecurityProfile) MarshalJSON() ([]byte, error) { // Create an asset type CreateCleanRoomAssetRequest struct { // Metadata of the clean room asset - Asset *CleanRoomAsset `json:"asset,omitempty"` + Asset CleanRoomAsset `json:"asset"` // Name of the clean room. CleanRoomName string `json:"-" url:"-"` } @@ -544,7 +544,7 @@ type CreateCleanRoomOutputCatalogRequest struct { // Name of the clean room. CleanRoomName string `json:"-" url:"-"` - OutputCatalog *CleanRoomOutputCatalog `json:"output_catalog,omitempty"` + OutputCatalog CleanRoomOutputCatalog `json:"output_catalog"` } type CreateCleanRoomOutputCatalogResponse struct { @@ -553,7 +553,7 @@ type CreateCleanRoomOutputCatalogResponse struct { // Create a clean room type CreateCleanRoomRequest struct { - CleanRoom *CleanRoom `json:"clean_room,omitempty"` + CleanRoom CleanRoom `json:"clean_room"` } // Delete an asset @@ -716,7 +716,7 @@ func (s ListCleanRoomsResponse) MarshalJSON() ([]byte, error) { // Update an asset type UpdateCleanRoomAssetRequest struct { // Metadata of the clean room asset - Asset *CleanRoomAsset `json:"asset,omitempty"` + Asset CleanRoomAsset `json:"asset"` // The type of the asset. AssetType CleanRoomAssetAssetType `json:"-" url:"-"` // Name of the clean room. diff --git a/service/compute/model.go b/service/compute/model.go index 394488a1d..7125b7e2b 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2747,9 +2747,12 @@ func (s EnforceClusterComplianceResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The environment entity used to preserve serverless environment side panel and -// jobs' environment for non-notebook task. In this minimal environment spec, -// only pip dependencies are supported. +// The environment entity used to preserve serverless environment side panel, +// jobs' environment for non-notebook task, and DLT's environment for classic +// and serverless pipelines. (Note: DLT uses a copied version of the Environment +// proto below, at +// //spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In +// this minimal environment spec, only pip dependencies are supported. type Environment struct { // Client version used by the environment The client is the user-facing // environment of the runtime. Each client comes with a specific set of @@ -5370,6 +5373,8 @@ const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_ const TerminationReasonCodeDisasterRecoveryReplication TerminationReasonCode = `DISASTER_RECOVERY_REPLICATION` +const TerminationReasonCodeDnsResolutionError TerminationReasonCode = `DNS_RESOLUTION_ERROR` + const TerminationReasonCodeDockerContainerCreationException TerminationReasonCode = `DOCKER_CONTAINER_CREATION_EXCEPTION` const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE` @@ -5406,6 +5411,8 @@ const TerminationReasonCodeExecutorPodUnscheduled TerminationReasonCode = `EXECU const TerminationReasonCodeGcpApiRateQuotaExceeded TerminationReasonCode = `GCP_API_RATE_QUOTA_EXCEEDED` +const TerminationReasonCodeGcpDeniedByOrgPolicy TerminationReasonCode = `GCP_DENIED_BY_ORG_POLICY` + const TerminationReasonCodeGcpForbidden TerminationReasonCode = `GCP_FORBIDDEN` const TerminationReasonCodeGcpIamTimeout TerminationReasonCode = `GCP_IAM_TIMEOUT` @@ -5588,11 +5595,11 @@ func (f *TerminationReasonCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationReasonCode) Set(v string) error { switch v { - case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: + case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: *f = TerminationReasonCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) } } diff --git a/service/dashboards/model.go b/service/dashboards/model.go index a3f936531..bfef2675a 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -79,7 +79,7 @@ type CancelQueryExecutionResponseStatus struct { // Create dashboard type CreateDashboardRequest struct { - Dashboard *Dashboard `json:"dashboard,omitempty"` + Dashboard Dashboard `json:"dashboard"` } // Create dashboard schedule @@ -87,7 +87,7 @@ type CreateScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. DashboardId string `json:"-" url:"-"` - Schedule *Schedule `json:"schedule,omitempty"` + Schedule Schedule `json:"schedule"` } // Create schedule subscription @@ -97,7 +97,7 @@ type CreateSubscriptionRequest struct { // UUID identifying the schedule to which the subscription belongs. ScheduleId string `json:"-" url:"-"` - Subscription *Subscription `json:"subscription,omitempty"` + Subscription Subscription `json:"subscription"` } type CronSchedule struct { @@ -1340,7 +1340,7 @@ type UnpublishDashboardResponse struct { // Update dashboard type UpdateDashboardRequest struct { - Dashboard *Dashboard `json:"dashboard,omitempty"` + Dashboard Dashboard `json:"dashboard"` // UUID identifying the dashboard. DashboardId string `json:"-" url:"-"` } @@ -1350,7 +1350,7 @@ type UpdateScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. DashboardId string `json:"-" url:"-"` - Schedule *Schedule `json:"schedule,omitempty"` + Schedule Schedule `json:"schedule"` // UUID identifying the schedule. ScheduleId string `json:"-" url:"-"` } diff --git a/service/jobs/model.go b/service/jobs/model.go index e3e23e7a8..ec3933531 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -651,9 +651,8 @@ type CreateJob struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` - // The performance mode on a serverless job. The performance target - // determines the level of compute performance or cost-efficiency for the - // run. + // The performance mode on a serverless job. This field determines the level + // of compute performance or cost-efficiency for the run. // // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times @@ -1574,9 +1573,12 @@ func (s JobEmailNotifications) MarshalJSON() ([]byte, error) { type JobEnvironment struct { // The key of an environment. It has to be unique within a job. EnvironmentKey string `json:"environment_key"` - // The environment entity used to preserve serverless environment side panel - // and jobs' environment for non-notebook task. In this minimal environment - // spec, only pip dependencies are supported. + // The environment entity used to preserve serverless environment side + // panel, jobs' environment for non-notebook task, and DLT's environment for + // classic and serverless pipelines. (Note: DLT uses a copied version of the + // Environment proto below, at + // //spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In + // this minimal environment spec, only pip dependencies are supported. Spec *compute.Environment `json:"spec,omitempty"` } @@ -1812,9 +1814,8 @@ type JobSettings struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` - // The performance mode on a serverless job. The performance target - // determines the level of compute performance or cost-efficiency for the - // run. + // The performance mode on a serverless job. This field determines the level + // of compute performance or cost-efficiency for the run. // // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times diff --git a/service/oauth2/model.go b/service/oauth2/model.go index 3d0fb78de..9782ef127 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -8,7 +8,7 @@ import ( // Create account federation policy type CreateAccountFederationPolicyRequest struct { - Policy *FederationPolicy `json:"policy,omitempty"` + Policy FederationPolicy `json:"policy"` // The identifier for the federation policy. The identifier must contain // only lowercase alphanumeric characters, numbers, hyphens, and slashes. If // unspecified, the id will be assigned by Databricks. @@ -109,7 +109,7 @@ func (s CreatePublishedAppIntegrationOutput) MarshalJSON() ([]byte, error) { // Create service principal federation policy type CreateServicePrincipalFederationPolicyRequest struct { - Policy *FederationPolicy `json:"policy,omitempty"` + Policy FederationPolicy `json:"policy"` // The identifier for the federation policy. The identifier must contain // only lowercase alphanumeric characters, numbers, hyphens, and slashes. If // unspecified, the id will be assigned by Databricks. @@ -656,7 +656,7 @@ func (s TokenAccessPolicy) MarshalJSON() ([]byte, error) { // Update account federation policy type UpdateAccountFederationPolicyRequest struct { - Policy *FederationPolicy `json:"policy,omitempty"` + Policy FederationPolicy `json:"policy"` // The identifier for the federation policy. PolicyId string `json:"-" url:"-"` // The field mask specifies which fields of the policy to update. To specify @@ -709,7 +709,7 @@ type UpdatePublishedAppIntegrationOutput struct { // Update service principal federation policy type UpdateServicePrincipalFederationPolicyRequest struct { - Policy *FederationPolicy `json:"policy,omitempty"` + Policy FederationPolicy `json:"policy"` // The identifier for the federation policy. PolicyId string `json:"-" url:"-"` // The service principal id for the federation policy. diff --git a/service/pkg.go b/service/pkg.go index aaefe6bdf..1d327c7dc 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -14,6 +14,8 @@ // // - [sql.AlertsLegacyAPI]: The alerts API can be used to perform CRUD operations on alerts. // +// - [sql.AlertsV2API]: TODO: Add description. +// // - [apps.AppsAPI]: Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. // // - [catalog.ArtifactAllowlistsAPI]: In Databricks Runtime 13.3 and above, you can add libraries and init scripts to the `allowlist` in UC so that users can leverage these artifacts on compute configured with shared access mode. @@ -326,6 +328,7 @@ var ( _ *settings.AibiDashboardEmbeddingApprovedDomainsAPI = nil _ *sql.AlertsAPI = nil _ *sql.AlertsLegacyAPI = nil + _ *sql.AlertsV2API = nil _ *apps.AppsAPI = nil _ *catalog.ArtifactAllowlistsAPI = nil _ *settings.AutomaticClusterUpdateAPI = nil @@ -345,8 +348,8 @@ var ( _ *marketplace.ConsumerListingsAPI = nil _ *marketplace.ConsumerPersonalizationRequestsAPI = nil _ *marketplace.ConsumerProvidersAPI = nil - _ *provisioning.CredentialsAPI = nil _ *catalog.CredentialsAPI = nil + _ *provisioning.CredentialsAPI = nil _ *settings.CredentialsManagerAPI = nil _ *settings.CspEnablementAccountAPI = nil _ *iam.CurrentUserAPI = nil diff --git a/service/serving/model.go b/service/serving/model.go index 502c8698c..f4f890c1c 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -1561,8 +1561,9 @@ type ServedEntityInput struct { // single unit of provisioned concurrency can process one request at a time. // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 - // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size is 0. + // provisioned concurrency). Additional custom workload sizes can also be + // used when available in the workspace. If scale-to-zero is enabled, the + // lower bound of the provisioned concurrency for each workload size is 0. WorkloadSize string `json:"workload_size,omitempty"` // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this @@ -1638,8 +1639,9 @@ type ServedEntityOutput struct { // single unit of provisioned concurrency can process one request at a time. // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 - // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size is 0. + // provisioned concurrency). Additional custom workload sizes can also be + // used when available in the workspace. If scale-to-zero is enabled, the + // lower bound of the provisioned concurrency for each workload size is 0. WorkloadSize string `json:"workload_size,omitempty"` // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this @@ -1717,9 +1719,10 @@ type ServedModelInput struct { // single unit of provisioned concurrency can process one request at a time. // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 - // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size is 0. - WorkloadSize ServedModelInputWorkloadSize `json:"workload_size,omitempty"` + // provisioned concurrency). Additional custom workload sizes can also be + // used when available in the workspace. If scale-to-zero is enabled, the + // lower bound of the provisioned concurrency for each workload size is 0. + WorkloadSize string `json:"workload_size,omitempty"` // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this // parameter is "CPU". For deep learning workloads, GPU acceleration is @@ -1740,35 +1743,6 @@ func (s ServedModelInput) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type ServedModelInputWorkloadSize string - -const ServedModelInputWorkloadSizeLarge ServedModelInputWorkloadSize = `Large` - -const ServedModelInputWorkloadSizeMedium ServedModelInputWorkloadSize = `Medium` - -const ServedModelInputWorkloadSizeSmall ServedModelInputWorkloadSize = `Small` - -// String representation for [fmt.Print] -func (f *ServedModelInputWorkloadSize) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *ServedModelInputWorkloadSize) Set(v string) error { - switch v { - case `Large`, `Medium`, `Small`: - *f = ServedModelInputWorkloadSize(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "Large", "Medium", "Small"`, v) - } -} - -// Type always returns ServedModelInputWorkloadSize to satisfy [pflag.Value] interface -func (f *ServedModelInputWorkloadSize) Type() string { - return "ServedModelInputWorkloadSize" -} - // Please keep this in sync with with workload types in // InferenceEndpointEntities.scala type ServedModelInputWorkloadType string @@ -1838,8 +1812,9 @@ type ServedModelOutput struct { // single unit of provisioned concurrency can process one request at a time. // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 - // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size is 0. + // provisioned concurrency). Additional custom workload sizes can also be + // used when available in the workspace. If scale-to-zero is enabled, the + // lower bound of the provisioned concurrency for each workload size is 0. WorkloadSize string `json:"workload_size,omitempty"` // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this diff --git a/service/settings/api.go b/service/settings/api.go index cb9a7f1ff..808e67ebd 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -1110,6 +1110,20 @@ func (a *IpAccessListsAPI) GetByLabel(ctx context.Context, name string) (*IpAcce type NetworkConnectivityInterface interface { // Create a network connectivity configuration. + // + // Creates a network connectivity configuration (NCC), which provides stable + // Azure service subnets when accessing your Azure Storage accounts. You can + // also use a network connectivity configuration to create Databricks managed + // private endpoints so that Databricks serverless compute resources privately + // access your resources. + // + // **IMPORTANT**: After you create the network connectivity configuration, you + // must assign one or more workspaces to the new network connectivity + // configuration. You can share one network connectivity configuration with + // multiple workspaces from the same Azure region within the same Databricks + // account. See [configure serverless secure connectivity]. + // + // [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) // Create a private endpoint rule. @@ -1166,12 +1180,12 @@ type NetworkConnectivityInterface interface { // Gets a network connectivity configuration. GetNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*NetworkConnectivityConfiguration, error) - // Get a private endpoint rule. + // Gets a private endpoint rule. // // Gets the private endpoint rule. GetPrivateEndpointRule(ctx context.Context, request GetPrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) - // Get a private endpoint rule. + // Gets a private endpoint rule. // // Gets the private endpoint rule. GetPrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) @@ -1208,6 +1222,12 @@ type NetworkConnectivityInterface interface { // // Gets an array of private endpoint rules. ListPrivateEndpointRulesByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*ListNccAzurePrivateEndpointRulesResponse, error) + + // Update a private endpoint rule. + // + // Updates a private endpoint rule. Currently only a private endpoint rule to + // customer-managed resources is allowed to be updated. + UpdateNccAzurePrivateEndpointRulePublic(ctx context.Context, request UpdateNccAzurePrivateEndpointRulePublicRequest) (*NccAzurePrivateEndpointRule, error) } func NewNetworkConnectivity(client *client.DatabricksClient) *NetworkConnectivityAPI { @@ -1219,7 +1239,14 @@ func NewNetworkConnectivity(client *client.DatabricksClient) *NetworkConnectivit } // These APIs provide configurations for the network connectivity of your -// workspaces for serverless compute resources. +// workspaces for serverless compute resources. This API provides stable subnets +// for your workspace so that you can configure your firewalls on your Azure +// Storage accounts to allow access from Databricks. You can also use the API to +// provision private endpoints for Databricks to privately connect serverless +// compute resources to your Azure resources using Azure Private Link. See +// [configure serverless secure connectivity]. +// +// [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security type NetworkConnectivityAPI struct { networkConnectivityImpl } @@ -1257,7 +1284,7 @@ func (a *NetworkConnectivityAPI) GetNetworkConnectivityConfigurationByNetworkCon }) } -// Get a private endpoint rule. +// Gets a private endpoint rule. // // Gets the private endpoint rule. func (a *NetworkConnectivityAPI) GetPrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) { diff --git a/service/settings/impl.go b/service/settings/impl.go index 7590bdf0d..d91e4d9be 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -695,7 +695,7 @@ func (a *networkConnectivityImpl) CreateNetworkConnectivityConfiguration(ctx con headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &networkConnectivityConfiguration) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.NetworkConnectivityConfig, &networkConnectivityConfiguration) return &networkConnectivityConfiguration, err } @@ -706,7 +706,7 @@ func (a *networkConnectivityImpl) CreatePrivateEndpointRule(ctx context.Context, headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &nccAzurePrivateEndpointRule) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.PrivateEndpointRule, &nccAzurePrivateEndpointRule) return &nccAzurePrivateEndpointRule, err } @@ -840,6 +840,20 @@ func (a *networkConnectivityImpl) internalListPrivateEndpointRules(ctx context.C return &listNccAzurePrivateEndpointRulesResponse, err } +func (a *networkConnectivityImpl) UpdateNccAzurePrivateEndpointRulePublic(ctx context.Context, request UpdateNccAzurePrivateEndpointRulePublicRequest) (*NccAzurePrivateEndpointRule, error) { + var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule + path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId, request.PrivateEndpointRuleId) + queryParams := make(map[string]any) + if request.UpdateMask != "" { + queryParams["update_mask"] = request.UpdateMask + } + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.PrivateEndpointRule, &nccAzurePrivateEndpointRule) + return &nccAzurePrivateEndpointRule, err +} + // unexported type that holds implementations of just NotificationDestinations API methods type notificationDestinationsImpl struct { client *client.DatabricksClient diff --git a/service/settings/interface.go b/service/settings/interface.go index 8042e91ae..cfed01c9e 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -562,10 +562,31 @@ type IpAccessListsService interface { } // These APIs provide configurations for the network connectivity of your -// workspaces for serverless compute resources. +// workspaces for serverless compute resources. This API provides stable subnets +// for your workspace so that you can configure your firewalls on your Azure +// Storage accounts to allow access from Databricks. You can also use the API to +// provision private endpoints for Databricks to privately connect serverless +// compute resources to your Azure resources using Azure Private Link. See +// [configure serverless secure connectivity]. +// +// [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security type NetworkConnectivityService interface { // Create a network connectivity configuration. + // + // Creates a network connectivity configuration (NCC), which provides stable + // Azure service subnets when accessing your Azure Storage accounts. You can + // also use a network connectivity configuration to create Databricks + // managed private endpoints so that Databricks serverless compute resources + // privately access your resources. + // + // **IMPORTANT**: After you create the network connectivity configuration, + // you must assign one or more workspaces to the new network connectivity + // configuration. You can share one network connectivity configuration with + // multiple workspaces from the same Azure region within the same Databricks + // account. See [configure serverless secure connectivity]. + // + // [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) // Create a private endpoint rule. @@ -602,7 +623,7 @@ type NetworkConnectivityService interface { // Gets a network connectivity configuration. GetNetworkConnectivityConfiguration(ctx context.Context, request GetNetworkConnectivityConfigurationRequest) (*NetworkConnectivityConfiguration, error) - // Get a private endpoint rule. + // Gets a private endpoint rule. // // Gets the private endpoint rule. GetPrivateEndpointRule(ctx context.Context, request GetPrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) @@ -620,6 +641,12 @@ type NetworkConnectivityService interface { // // Use ListPrivateEndpointRulesAll() to get all NccAzurePrivateEndpointRule instances, which will iterate over every result page. ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) + + // Update a private endpoint rule. + // + // Updates a private endpoint rule. Currently only a private endpoint rule + // to customer-managed resources is allowed to be updated. + UpdateNccAzurePrivateEndpointRulePublic(ctx context.Context, request UpdateNccAzurePrivateEndpointRulePublicRequest) (*NccAzurePrivateEndpointRule, error) } // The notification destinations API lets you programmatically manage a diff --git a/service/settings/model.go b/service/settings/model.go index 8775aaa34..a3f5747eb 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -455,11 +455,18 @@ type CreateIpAccessListResponse struct { IpAccessList *IpAccessListInfo `json:"ip_access_list,omitempty"` } +// Create a network connectivity configuration type CreateNetworkConnectivityConfigRequest struct { + // Properties of the new network connectivity configuration. + NetworkConnectivityConfig CreateNetworkConnectivityConfiguration `json:"network_connectivity_config"` +} + +// Properties of the new network connectivity configuration. +type CreateNetworkConnectivityConfiguration struct { // The name of the network connectivity configuration. The name can contain // alphanumeric characters, hyphens, and underscores. The length must be // between 3 and 30 characters. The name must match the regular expression - // `^[0-9a-zA-Z-_]{3,30}$`. + // ^[0-9a-zA-Z-_]{3,30}$ Name string `json:"name"` // The region for the network connectivity configuration. Only workspaces in // the same region can be attached to the network connectivity @@ -522,49 +529,42 @@ func (s CreateOboTokenResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type CreatePrivateEndpointRuleRequest struct { +// Properties of the new private endpoint rule. Note that you must approve the +// endpoint in Azure portal after initialization. +type CreatePrivateEndpointRule struct { + // Only used by private endpoints to customer-managed resources. + // + // Domain names of target private link service. When updating this field, + // the full list of target domain_names must be specified. + DomainNames []string `json:"domain_names,omitempty"` + // Only used by private endpoints to Azure first-party services. Enum: blob + // | dfs | sqlServer | mysqlServer + // // The sub-resource type (group ID) of the target resource. Note that to // connect to workspace root storage (root DBFS), you need two endpoints, - // one for `blob` and one for `dfs`. - GroupId CreatePrivateEndpointRuleRequestGroupId `json:"group_id"` - // Your Network Connectvity Configuration ID. - NetworkConnectivityConfigId string `json:"-" url:"-"` + // one for blob and one for dfs. + GroupId string `json:"group_id,omitempty"` // The Azure resource ID of the target resource. ResourceId string `json:"resource_id"` -} - -// The sub-resource type (group ID) of the target resource. Note that to connect -// to workspace root storage (root DBFS), you need two endpoints, one for `blob` -// and one for `dfs`. -type CreatePrivateEndpointRuleRequestGroupId string - -const CreatePrivateEndpointRuleRequestGroupIdBlob CreatePrivateEndpointRuleRequestGroupId = `blob` -const CreatePrivateEndpointRuleRequestGroupIdDfs CreatePrivateEndpointRuleRequestGroupId = `dfs` - -const CreatePrivateEndpointRuleRequestGroupIdMysqlServer CreatePrivateEndpointRuleRequestGroupId = `mysqlServer` - -const CreatePrivateEndpointRuleRequestGroupIdSqlServer CreatePrivateEndpointRuleRequestGroupId = `sqlServer` + ForceSendFields []string `json:"-" url:"-"` +} -// String representation for [fmt.Print] -func (f *CreatePrivateEndpointRuleRequestGroupId) String() string { - return string(*f) +func (s *CreatePrivateEndpointRule) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) } -// Set raw string value and validate it against allowed values -func (f *CreatePrivateEndpointRuleRequestGroupId) Set(v string) error { - switch v { - case `blob`, `dfs`, `mysqlServer`, `sqlServer`: - *f = CreatePrivateEndpointRuleRequestGroupId(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "blob", "dfs", "mysqlServer", "sqlServer"`, v) - } +func (s CreatePrivateEndpointRule) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } -// Type always returns CreatePrivateEndpointRuleRequestGroupId to satisfy [pflag.Value] interface -func (f *CreatePrivateEndpointRuleRequestGroupId) Type() string { - return "CreatePrivateEndpointRuleRequestGroupId" +// Create a private endpoint rule +type CreatePrivateEndpointRuleRequest struct { + // Your Network Connectivity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` + // Properties of the new private endpoint rule. Note that you must approve + // the endpoint in Azure portal after initialization. + PrivateEndpointRule CreatePrivateEndpointRule `json:"private_endpoint_rule"` } type CreateTokenRequest struct { @@ -941,7 +941,7 @@ type DeleteIpAccessListRequest struct { // Delete a network connectivity configuration type DeleteNetworkConnectivityConfigurationRequest struct { - // Your Network Connectvity Configuration ID. + // Your Network Connectivity Configuration ID. NetworkConnectivityConfigId string `json:"-" url:"-"` } @@ -1416,6 +1416,35 @@ func (f *EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestina return "EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType" } +// The target resources that are supported by Network Connectivity Config. Note: +// some egress types can support general types that are not defined in +// EgressResourceType. E.g.: Azure private endpoint supports private link +// enabled Azure services. +type EgressResourceType string + +const EgressResourceTypeAzureBlobStorage EgressResourceType = `AZURE_BLOB_STORAGE` + +// String representation for [fmt.Print] +func (f *EgressResourceType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EgressResourceType) Set(v string) error { + switch v { + case `AZURE_BLOB_STORAGE`: + *f = EgressResourceType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AZURE_BLOB_STORAGE"`, v) + } +} + +// Type always returns EgressResourceType to satisfy [pflag.Value] interface +func (f *EgressResourceType) Type() string { + return "EgressResourceType" +} + type EmailConfig struct { // Email addresses to notify. Addresses []string `json:"addresses,omitempty"` @@ -1931,7 +1960,7 @@ type GetIpAccessListsResponse struct { // Get a network connectivity configuration type GetNetworkConnectivityConfigurationRequest struct { - // Your Network Connectvity Configuration ID. + // Your Network Connectivity Configuration ID. NetworkConnectivityConfigId string `json:"-" url:"-"` } @@ -1962,7 +1991,7 @@ func (s GetPersonalComputeSettingRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Get a private endpoint rule +// Gets a private endpoint rule type GetPrivateEndpointRuleRequest struct { // Your Network Connectvity Configuration ID. NetworkConnectivityConfigId string `json:"-" url:"-"` @@ -2057,6 +2086,7 @@ type ListIpAccessListResponse struct { IpAccessLists []IpAccessListInfo `json:"ip_access_lists,omitempty"` } +// The private endpoint rule list was successfully retrieved. type ListNccAzurePrivateEndpointRulesResponse struct { Items []NccAzurePrivateEndpointRule `json:"items,omitempty"` // A token that can be used to get the next page of results. If null, there @@ -2090,6 +2120,7 @@ func (s ListNetworkConnectivityConfigurationsRequest) MarshalJSON() ([]byte, err return marshal.Marshal(s) } +// The network connectivity configuration list was successfully retrieved. type ListNetworkConnectivityConfigurationsResponse struct { Items []NetworkConnectivityConfiguration `json:"items,omitempty"` // A token that can be used to get the next page of results. If null, there @@ -2268,20 +2299,21 @@ type NccAwsStableIpRule struct { CidrBlocks []string `json:"cidr_blocks,omitempty"` } +// Properties of the new private endpoint rule. Note that you must approve the +// endpoint in Azure portal after initialization. type NccAzurePrivateEndpointRule struct { // The current status of this private endpoint. The private endpoint rules - // are effective only if the connection state is `ESTABLISHED`. Remember - // that you must approve new endpoints on your resources in the Azure portal - // before they take effect. - // - // The possible values are: - INIT: (deprecated) The endpoint has been - // created and pending approval. - PENDING: The endpoint has been created - // and pending approval. - ESTABLISHED: The endpoint has been approved and - // is ready to use in your serverless compute resources. - REJECTED: - // Connection was rejected by the private link resource owner. - - // DISCONNECTED: Connection was removed by the private link resource owner, - // the private endpoint becomes informative and should be deleted for - // clean-up. + // are effective only if the connection state is ESTABLISHED. Remember that + // you must approve new endpoints on your resources in the Azure portal + // before they take effect. The possible values are: - INIT: (deprecated) + // The endpoint has been created and pending approval. - PENDING: The + // endpoint has been created and pending approval. - ESTABLISHED: The + // endpoint has been approved and is ready to use in your serverless compute + // resources. - REJECTED: Connection was rejected by the private link + // resource owner. - DISCONNECTED: Connection was removed by the private + // link resource owner, the private endpoint becomes informative and should + // be deleted for clean-up. - EXPIRED: If the endpoint was created but not + // approved in 14 days, it will be EXPIRED. ConnectionState NccAzurePrivateEndpointRuleConnectionState `json:"connection_state,omitempty"` // Time in epoch milliseconds when this object was created. CreationTime int64 `json:"creation_time,omitempty"` @@ -2289,12 +2321,20 @@ type NccAzurePrivateEndpointRule struct { Deactivated bool `json:"deactivated,omitempty"` // Time in epoch milliseconds when this object was deactivated. DeactivatedAt int64 `json:"deactivated_at,omitempty"` + // Only used by private endpoints to customer-managed resources. + // + // Domain names of target private link service. When updating this field, + // the full list of target domain_names must be specified. + DomainNames []string `json:"domain_names,omitempty"` // The name of the Azure private endpoint resource. EndpointName string `json:"endpoint_name,omitempty"` + // Only used by private endpoints to Azure first-party services. Enum: blob + // | dfs | sqlServer | mysqlServer + // // The sub-resource type (group ID) of the target resource. Note that to // connect to workspace root storage (root DBFS), you need two endpoints, - // one for `blob` and one for `dfs`. - GroupId NccAzurePrivateEndpointRuleGroupId `json:"group_id,omitempty"` + // one for blob and one for dfs. + GroupId string `json:"group_id,omitempty"` // The ID of a network connectivity configuration, which is the parent // resource of this private endpoint rule object. NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` @@ -2316,24 +2356,14 @@ func (s NccAzurePrivateEndpointRule) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The current status of this private endpoint. The private endpoint rules are -// effective only if the connection state is `ESTABLISHED`. Remember that you -// must approve new endpoints on your resources in the Azure portal before they -// take effect. -// -// The possible values are: - INIT: (deprecated) The endpoint has been created -// and pending approval. - PENDING: The endpoint has been created and pending -// approval. - ESTABLISHED: The endpoint has been approved and is ready to use -// in your serverless compute resources. - REJECTED: Connection was rejected by -// the private link resource owner. - DISCONNECTED: Connection was removed by -// the private link resource owner, the private endpoint becomes informative and -// should be deleted for clean-up. type NccAzurePrivateEndpointRuleConnectionState string const NccAzurePrivateEndpointRuleConnectionStateDisconnected NccAzurePrivateEndpointRuleConnectionState = `DISCONNECTED` const NccAzurePrivateEndpointRuleConnectionStateEstablished NccAzurePrivateEndpointRuleConnectionState = `ESTABLISHED` +const NccAzurePrivateEndpointRuleConnectionStateExpired NccAzurePrivateEndpointRuleConnectionState = `EXPIRED` + const NccAzurePrivateEndpointRuleConnectionStateInit NccAzurePrivateEndpointRuleConnectionState = `INIT` const NccAzurePrivateEndpointRuleConnectionStatePending NccAzurePrivateEndpointRuleConnectionState = `PENDING` @@ -2348,11 +2378,11 @@ func (f *NccAzurePrivateEndpointRuleConnectionState) String() string { // Set raw string value and validate it against allowed values func (f *NccAzurePrivateEndpointRuleConnectionState) Set(v string) error { switch v { - case `DISCONNECTED`, `ESTABLISHED`, `INIT`, `PENDING`, `REJECTED`: + case `DISCONNECTED`, `ESTABLISHED`, `EXPIRED`, `INIT`, `PENDING`, `REJECTED`: *f = NccAzurePrivateEndpointRuleConnectionState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "ESTABLISHED", "INIT", "PENDING", "REJECTED"`, v) + return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "ESTABLISHED", "EXPIRED", "INIT", "PENDING", "REJECTED"`, v) } } @@ -2361,40 +2391,6 @@ func (f *NccAzurePrivateEndpointRuleConnectionState) Type() string { return "NccAzurePrivateEndpointRuleConnectionState" } -// The sub-resource type (group ID) of the target resource. Note that to connect -// to workspace root storage (root DBFS), you need two endpoints, one for `blob` -// and one for `dfs`. -type NccAzurePrivateEndpointRuleGroupId string - -const NccAzurePrivateEndpointRuleGroupIdBlob NccAzurePrivateEndpointRuleGroupId = `blob` - -const NccAzurePrivateEndpointRuleGroupIdDfs NccAzurePrivateEndpointRuleGroupId = `dfs` - -const NccAzurePrivateEndpointRuleGroupIdMysqlServer NccAzurePrivateEndpointRuleGroupId = `mysqlServer` - -const NccAzurePrivateEndpointRuleGroupIdSqlServer NccAzurePrivateEndpointRuleGroupId = `sqlServer` - -// String representation for [fmt.Print] -func (f *NccAzurePrivateEndpointRuleGroupId) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *NccAzurePrivateEndpointRuleGroupId) Set(v string) error { - switch v { - case `blob`, `dfs`, `mysqlServer`, `sqlServer`: - *f = NccAzurePrivateEndpointRuleGroupId(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "blob", "dfs", "mysqlServer", "sqlServer"`, v) - } -} - -// Type always returns NccAzurePrivateEndpointRuleGroupId to satisfy [pflag.Value] interface -func (f *NccAzurePrivateEndpointRuleGroupId) Type() string { - return "NccAzurePrivateEndpointRuleGroupId" -} - // The stable Azure service endpoints. You can configure the firewall of your // Azure resources to allow traffic from your Databricks serverless compute // resources. @@ -2402,10 +2398,10 @@ type NccAzureServiceEndpointRule struct { // The list of subnets from which Databricks network traffic originates when // accessing your Azure resources. Subnets []string `json:"subnets,omitempty"` - // The Azure region in which this service endpoint rule applies. + // The Azure region in which this service endpoint rule applies.. TargetRegion string `json:"target_region,omitempty"` // The Azure services to which this service endpoint rule applies to. - TargetServices []string `json:"target_services,omitempty"` + TargetServices []EgressResourceType `json:"target_services,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -2418,8 +2414,6 @@ func (s NccAzureServiceEndpointRule) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The network connectivity rules that apply to network traffic from your -// serverless compute resources. type NccEgressConfig struct { // The network connectivity rules that are applied by default without // resource specific configurations. You can find the stable network @@ -2430,9 +2424,7 @@ type NccEgressConfig struct { TargetRules *NccEgressTargetRules `json:"target_rules,omitempty"` } -// The network connectivity rules that are applied by default without resource -// specific configurations. You can find the stable network information of your -// serverless compute resources here. +// Default rules don't have specific targets. type NccEgressDefaultRules struct { // The stable AWS IP CIDR blocks. You can use these to configure the // firewall of your resources to allow traffic from your Databricks @@ -2444,12 +2436,13 @@ type NccEgressDefaultRules struct { AzureServiceEndpointRule *NccAzureServiceEndpointRule `json:"azure_service_endpoint_rule,omitempty"` } -// The network connectivity rules that configured for each destinations. These -// rules override default rules. +// Target rule controls the egress rules that are dedicated to specific +// resources. type NccEgressTargetRules struct { AzurePrivateEndpointRules []NccAzurePrivateEndpointRule `json:"azure_private_endpoint_rules,omitempty"` } +// Properties of the new network connectivity configuration. type NetworkConnectivityConfiguration struct { // The Databricks account ID that hosts the credential. AccountId string `json:"account_id,omitempty"` @@ -2461,7 +2454,7 @@ type NetworkConnectivityConfiguration struct { // The name of the network connectivity configuration. The name can contain // alphanumeric characters, hyphens, and underscores. The length must be // between 3 and 30 characters. The name must match the regular expression - // `^[0-9a-zA-Z-_]{3,30}$`. + // ^[0-9a-zA-Z-_]{3,30}$ Name string `json:"name,omitempty"` // Databricks network connectivity configuration ID. NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` @@ -3306,6 +3299,24 @@ func (s UpdateIpAccessList) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Update a private endpoint rule +type UpdateNccAzurePrivateEndpointRulePublicRequest struct { + // Your Network Connectivity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` + // Properties of the new private endpoint rule. Note that you must approve + // the endpoint in Azure portal after initialization. + PrivateEndpointRule UpdatePrivateEndpointRule `json:"private_endpoint_rule"` + // Your private endpoint rule ID. + PrivateEndpointRuleId string `json:"-" url:"-"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + UpdateMask string `json:"-" url:"update_mask"` +} + type UpdateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. @@ -3347,6 +3358,16 @@ type UpdatePersonalComputeSettingRequest struct { Setting PersonalComputeSetting `json:"setting"` } +// Properties of the new private endpoint rule. Note that you must approve the +// endpoint in Azure portal after initialization. +type UpdatePrivateEndpointRule struct { + // Only used by private endpoints to customer-managed resources. + // + // Domain names of target private link service. When updating this field, + // the full list of target domain_names must be specified. + DomainNames []string `json:"domain_names,omitempty"` +} + type UpdateResponse struct { } diff --git a/service/sql/api.go b/service/sql/api.go index 409dc26c3..a41db187c 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Alerts, Alerts Legacy, Dashboard Widgets, Dashboards, Data Sources, Dbsql Permissions, Queries, Queries Legacy, Query History, Query Visualizations, Query Visualizations Legacy, Redash Config, Statement Execution, Warehouses, etc. +// These APIs allow you to manage Alerts, Alerts Legacy, Alerts V2, Dashboard Widgets, Dashboards, Data Sources, Dbsql Permissions, Queries, Queries Legacy, Query History, Query Visualizations, Query Visualizations Legacy, Redash Config, Statement Execution, Warehouses, etc. package sql import ( @@ -379,6 +379,161 @@ func (a *AlertsLegacyAPI) GetByName(ctx context.Context, name string) (*LegacyAl return &alternatives[0], nil } +type AlertsV2Interface interface { + + // Create an alert. + // + // Create Alert + CreateAlert(ctx context.Context, request CreateAlertV2Request) (*AlertV2, error) + + // Get an alert. + // + // Gets an alert. + GetAlert(ctx context.Context, request GetAlertV2Request) (*AlertV2, error) + + // Get an alert. + // + // Gets an alert. + GetAlertById(ctx context.Context, id string) (*AlertV2, error) + + // List alerts. + // + // Gets a list of alerts accessible to the user, ordered by creation time. + // + // This method is generated by Databricks SDK Code Generator. + ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[ListAlertsV2ResponseAlert] + + // List alerts. + // + // Gets a list of alerts accessible to the user, ordered by creation time. + // + // This method is generated by Databricks SDK Code Generator. + ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]ListAlertsV2ResponseAlert, error) + + // ListAlertsV2ResponseAlertDisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [ListAlertsV2ResponseAlert].DisplayName as key and [ListAlertsV2ResponseAlert].Id as value. + // + // Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. + // + // Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) + + // GetByDisplayName calls [AlertsV2API.ListAlertsV2ResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsV2ResponseAlert]. + // + // Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. + // + // Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*ListAlertsV2ResponseAlert, error) + + // Delete an alert. + // + // Moves an alert to the trash. Trashed alerts immediately disappear from list + // views, and can no longer trigger. You can restore a trashed alert through the + // UI. A trashed alert is permanently deleted after 30 days. + TrashAlert(ctx context.Context, request TrashAlertV2Request) error + + // Delete an alert. + // + // Moves an alert to the trash. Trashed alerts immediately disappear from list + // views, and can no longer trigger. You can restore a trashed alert through the + // UI. A trashed alert is permanently deleted after 30 days. + TrashAlertById(ctx context.Context, id string) error + + // Update an alert. + // + // Update alert + UpdateAlert(ctx context.Context, request UpdateAlertV2Request) (*AlertV2, error) +} + +func NewAlertsV2(client *client.DatabricksClient) *AlertsV2API { + return &AlertsV2API{ + alertsV2Impl: alertsV2Impl{ + client: client, + }, + } +} + +// TODO: Add description +type AlertsV2API struct { + alertsV2Impl +} + +// Get an alert. +// +// Gets an alert. +func (a *AlertsV2API) GetAlertById(ctx context.Context, id string) (*AlertV2, error) { + return a.alertsV2Impl.GetAlert(ctx, GetAlertV2Request{ + Id: id, + }) +} + +// ListAlertsV2ResponseAlertDisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [ListAlertsV2ResponseAlert].DisplayName as key and [ListAlertsV2ResponseAlert].Id as value. +// +// Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. +// +// Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AlertsV2API) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAlertsAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [AlertsV2API.ListAlertsV2ResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsV2ResponseAlert]. +// +// Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. +// +// Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AlertsV2API) GetByDisplayName(ctx context.Context, name string) (*ListAlertsV2ResponseAlert, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAlertsAll(ctx, ListAlertsV2Request{}) + if err != nil { + return nil, err + } + tmp := map[string][]ListAlertsV2ResponseAlert{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ListAlertsV2ResponseAlert named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ListAlertsV2ResponseAlert named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// Delete an alert. +// +// Moves an alert to the trash. Trashed alerts immediately disappear from list +// views, and can no longer trigger. You can restore a trashed alert through the +// UI. A trashed alert is permanently deleted after 30 days. +func (a *AlertsV2API) TrashAlertById(ctx context.Context, id string) error { + return a.alertsV2Impl.TrashAlert(ctx, TrashAlertV2Request{ + Id: id, + }) +} + type DashboardWidgetsInterface interface { // Add widget to a dashboard. diff --git a/service/sql/impl.go b/service/sql/impl.go index e5f404388..c43cd5e31 100755 --- a/service/sql/impl.go +++ b/service/sql/impl.go @@ -165,6 +165,98 @@ func (a *alertsLegacyImpl) Update(ctx context.Context, request EditAlert) error return err } +// unexported type that holds implementations of just AlertsV2 API methods +type alertsV2Impl struct { + client *client.DatabricksClient +} + +func (a *alertsV2Impl) CreateAlert(ctx context.Context, request CreateAlertV2Request) (*AlertV2, error) { + var alertV2 AlertV2 + path := "/api/2.0/alerts" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &alertV2) + return &alertV2, err +} + +func (a *alertsV2Impl) GetAlert(ctx context.Context, request GetAlertV2Request) (*AlertV2, error) { + var alertV2 AlertV2 + path := fmt.Sprintf("/api/2.0/alerts/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &alertV2) + return &alertV2, err +} + +// List alerts. +// +// Gets a list of alerts accessible to the user, ordered by creation time. +func (a *alertsV2Impl) ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[ListAlertsV2ResponseAlert] { + + getNextPage := func(ctx context.Context, req ListAlertsV2Request) (*ListAlertsV2Response, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListAlerts(ctx, req) + } + getItems := func(resp *ListAlertsV2Response) []ListAlertsV2ResponseAlert { + return resp.Results + } + getNextReq := func(resp *ListAlertsV2Response) *ListAlertsV2Request { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List alerts. +// +// Gets a list of alerts accessible to the user, ordered by creation time. +func (a *alertsV2Impl) ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]ListAlertsV2ResponseAlert, error) { + iterator := a.ListAlerts(ctx, request) + return listing.ToSlice[ListAlertsV2ResponseAlert](ctx, iterator) +} + +func (a *alertsV2Impl) internalListAlerts(ctx context.Context, request ListAlertsV2Request) (*ListAlertsV2Response, error) { + var listAlertsV2Response ListAlertsV2Response + path := "/api/2.0/alerts" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAlertsV2Response) + return &listAlertsV2Response, err +} + +func (a *alertsV2Impl) TrashAlert(ctx context.Context, request TrashAlertV2Request) error { + var empty Empty + path := fmt.Sprintf("/api/2.0/alerts/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) + return err +} + +func (a *alertsV2Impl) UpdateAlert(ctx context.Context, request UpdateAlertV2Request) (*AlertV2, error) { + var alertV2 AlertV2 + path := fmt.Sprintf("/api/2.0/alerts/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &alertV2) + return &alertV2, err +} + // unexported type that holds implementations of just DashboardWidgets API methods type dashboardWidgetsImpl struct { client *client.DatabricksClient diff --git a/service/sql/interface.go b/service/sql/interface.go index 52301cdeb..5fcef16a6 100755 --- a/service/sql/interface.go +++ b/service/sql/interface.go @@ -113,6 +113,39 @@ type AlertsLegacyService interface { Update(ctx context.Context, request EditAlert) error } +// TODO: Add description +type AlertsV2Service interface { + + // Create an alert. + // + // Create Alert + CreateAlert(ctx context.Context, request CreateAlertV2Request) (*AlertV2, error) + + // Get an alert. + // + // Gets an alert. + GetAlert(ctx context.Context, request GetAlertV2Request) (*AlertV2, error) + + // List alerts. + // + // Gets a list of alerts accessible to the user, ordered by creation time. + // + // Use ListAlertsAll() to get all ListAlertsV2ResponseAlert instances, which will iterate over every result page. + ListAlerts(ctx context.Context, request ListAlertsV2Request) (*ListAlertsV2Response, error) + + // Delete an alert. + // + // Moves an alert to the trash. Trashed alerts immediately disappear from + // list views, and can no longer trigger. You can restore a trashed alert + // through the UI. A trashed alert is permanently deleted after 30 days. + TrashAlert(ctx context.Context, request TrashAlertV2Request) error + + // Update an alert. + // + // Update alert + UpdateAlert(ctx context.Context, request UpdateAlertV2Request) (*AlertV2, error) +} + // This is an evolving API that facilitates the addition and removal of widgets // from existing dashboards within the Databricks Workspace. Data structures may // change over time. diff --git a/service/sql/model.go b/service/sql/model.go index a74f2de93..9eca5ef2b 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -27,6 +27,45 @@ func (s AccessControl) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type Aggregation string + +const AggregationAvg Aggregation = `AVG` + +const AggregationCount Aggregation = `COUNT` + +const AggregationCountDistinct Aggregation = `COUNT_DISTINCT` + +const AggregationMax Aggregation = `MAX` + +const AggregationMedian Aggregation = `MEDIAN` + +const AggregationMin Aggregation = `MIN` + +const AggregationStddev Aggregation = `STDDEV` + +const AggregationSum Aggregation = `SUM` + +// String representation for [fmt.Print] +func (f *Aggregation) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Aggregation) Set(v string) error { + switch v { + case `AVG`, `COUNT`, `COUNT_DISTINCT`, `MAX`, `MEDIAN`, `MIN`, `STDDEV`, `SUM`: + *f = Aggregation(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AVG", "COUNT", "COUNT_DISTINCT", "MAX", "MEDIAN", "MIN", "STDDEV", "SUM"`, v) + } +} + +// Type always returns Aggregation to satisfy [pflag.Value] interface +func (f *Aggregation) Type() string { + return "Aggregation" +} + type Alert struct { // Trigger conditions of the alert. Condition *AlertCondition `json:"condition,omitempty"` @@ -103,6 +142,40 @@ type AlertConditionThreshold struct { Value *AlertOperandValue `json:"value,omitempty"` } +// UNSPECIFIED - default unspecify value for proto enum, do not use it in the +// code UNKNOWN - alert not yet evaluated TRIGGERED - alert is triggered OK - +// alert is not triggered ERROR - alert evaluation failed +type AlertEvaluationState string + +const AlertEvaluationStateError AlertEvaluationState = `ERROR` + +const AlertEvaluationStateOk AlertEvaluationState = `OK` + +const AlertEvaluationStateTriggered AlertEvaluationState = `TRIGGERED` + +const AlertEvaluationStateUnknown AlertEvaluationState = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *AlertEvaluationState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertEvaluationState) Set(v string) error { + switch v { + case `ERROR`, `OK`, `TRIGGERED`, `UNKNOWN`: + *f = AlertEvaluationState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ERROR", "OK", "TRIGGERED", "UNKNOWN"`, v) + } +} + +// Type always returns AlertEvaluationState to satisfy [pflag.Value] interface +func (f *AlertEvaluationState) Type() string { + return "AlertEvaluationState" +} + type AlertOperandColumn struct { Name string `json:"name,omitempty"` @@ -321,6 +394,156 @@ func (f *AlertState) Type() string { return "AlertState" } +type AlertV2 struct { + // The timestamp indicating when the alert was created. + CreateTime string `json:"create_time,omitempty"` + // Custom description for the alert. support mustache template. + CustomDescription string `json:"custom_description,omitempty"` + // Custom summary for the alert. support mustache template. + CustomSummary string `json:"custom_summary,omitempty"` + // The display name of the alert. + DisplayName string `json:"display_name,omitempty"` + + Evaluation *AlertV2Evaluation `json:"evaluation,omitempty"` + // UUID identifying the alert. + Id string `json:"id,omitempty"` + // Indicates whether the query is trashed. + LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` + // The owner's username. This field is set to "Unavailable" if the user has + // been deleted. + OwnerUserName string `json:"owner_user_name,omitempty"` + // The workspace path of the folder containing the alert. Can only be set on + // create, and cannot be updated. + ParentPath string `json:"parent_path,omitempty"` + // Text of the query to be run. + QueryText string `json:"query_text,omitempty"` + // The run as username. This field is set to "Unavailable" if the user has + // been deleted. + RunAsUserName string `json:"run_as_user_name,omitempty"` + + Schedule *CronSchedule `json:"schedule,omitempty"` + // The timestamp indicating when the alert was updated. + UpdateTime string `json:"update_time,omitempty"` + // ID of the SQL warehouse attached to the alert. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2Evaluation struct { + // Operator used for comparison in alert evaluation. + ComparisonOperator ComparisonOperator `json:"comparison_operator,omitempty"` + // Alert state if result is empty. + EmptyResultState AlertEvaluationState `json:"empty_result_state,omitempty"` + // Timestamp of the last evaluation. + LastEvaluatedAt string `json:"last_evaluated_at,omitempty"` + // User or Notification Destination to notify when alert is triggered. + Notification *AlertV2Notification `json:"notification,omitempty"` + // Source column from result to use to evaluate alert + Source *AlertV2OperandColumn `json:"source,omitempty"` + // Latest state of alert evaluation. + State AlertEvaluationState `json:"state,omitempty"` + // Threshold to user for alert evaluation, can be a column or a value. + Threshold *AlertV2Operand `json:"threshold,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2Evaluation) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2Evaluation) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2Notification struct { + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk bool `json:"notify_on_ok,omitempty"` + // Number of seconds an alert must wait after being triggered to rearm + // itself. After rearming, it can be triggered again. If 0 or not specified, + // the alert will not be triggered again. + RetriggerSeconds int `json:"retrigger_seconds,omitempty"` + + Subscriptions []AlertV2Subscription `json:"subscriptions,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2Notification) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2Notification) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2Operand struct { + Column *AlertV2OperandColumn `json:"column,omitempty"` + + Value *AlertV2OperandValue `json:"value,omitempty"` +} + +type AlertV2OperandColumn struct { + Aggregation Aggregation `json:"aggregation,omitempty"` + + Display string `json:"display,omitempty"` + + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2OperandColumn) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2OperandColumn) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2OperandValue struct { + BoolValue bool `json:"bool_value,omitempty"` + + DoubleValue float64 `json:"double_value,omitempty"` + + StringValue string `json:"string_value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2OperandValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2OperandValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2Subscription struct { + DestinationId string `json:"destination_id,omitempty"` + + UserEmail string `json:"user_email,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2Subscription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2Subscription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Describes metadata for a particular chunk, within a result set; this // structure is used both within a manifest, and when fetching individual chunk // data or links. @@ -548,6 +771,45 @@ func (f *ColumnInfoTypeName) Type() string { return "ColumnInfoTypeName" } +type ComparisonOperator string + +const ComparisonOperatorEqual ComparisonOperator = `EQUAL` + +const ComparisonOperatorGreaterThan ComparisonOperator = `GREATER_THAN` + +const ComparisonOperatorGreaterThanOrEqual ComparisonOperator = `GREATER_THAN_OR_EQUAL` + +const ComparisonOperatorIsNotNull ComparisonOperator = `IS_NOT_NULL` + +const ComparisonOperatorIsNull ComparisonOperator = `IS_NULL` + +const ComparisonOperatorLessThan ComparisonOperator = `LESS_THAN` + +const ComparisonOperatorLessThanOrEqual ComparisonOperator = `LESS_THAN_OR_EQUAL` + +const ComparisonOperatorNotEqual ComparisonOperator = `NOT_EQUAL` + +// String representation for [fmt.Print] +func (f *ComparisonOperator) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ComparisonOperator) Set(v string) error { + switch v { + case `EQUAL`, `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `IS_NOT_NULL`, `IS_NULL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `NOT_EQUAL`: + *f = ComparisonOperator(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EQUAL", "GREATER_THAN", "GREATER_THAN_OR_EQUAL", "IS_NOT_NULL", "IS_NULL", "LESS_THAN", "LESS_THAN_OR_EQUAL", "NOT_EQUAL"`, v) + } +} + +// Type always returns ComparisonOperator to satisfy [pflag.Value] interface +func (f *ComparisonOperator) Type() string { + return "ComparisonOperator" +} + type CreateAlert struct { // Name of the alert. Name string `json:"name"` @@ -575,6 +837,20 @@ func (s CreateAlert) MarshalJSON() ([]byte, error) { type CreateAlertRequest struct { Alert *CreateAlertRequestAlert `json:"alert,omitempty"` + // If true, automatically resolve alert display name conflicts. Otherwise, + // fail the request if the alert's display name conflicts with an existing + // alert's display name. + AutoResolveDisplayName bool `json:"auto_resolve_display_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CreateAlertRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateAlertRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type CreateAlertRequestAlert struct { @@ -615,8 +891,27 @@ func (s CreateAlertRequestAlert) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type CreateAlertV2Request struct { + Alert *AlertV2 `json:"alert,omitempty"` +} + type CreateQueryRequest struct { + // If true, automatically resolve query display name conflicts. Otherwise, + // fail the request if the query's display name conflicts with an existing + // query's display name. + AutoResolveDisplayName bool `json:"auto_resolve_display_name,omitempty"` + Query *CreateQueryRequestQuery `json:"query,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CreateQueryRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateQueryRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type CreateQueryRequestQuery struct { @@ -863,6 +1158,31 @@ func (s CreateWidget) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type CronSchedule struct { + // Indicate whether this schedule is paused or not. + PauseStatus SchedulePauseStatus `json:"pause_status,omitempty"` + // A cron expression using quartz syntax that specifies the schedule for + // this pipeline. Should use the quartz format described here: + // http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html + QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"` + // A Java timezone id. The schedule will be resolved using this timezone. + // This will be combined with the quartz_cron_schedule to determine the + // schedule. See + // https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html + // for details. + TimezoneId string `json:"timezone_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CronSchedule) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CronSchedule) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // A JSON representing a dashboard containing widgets of visualizations and text // boxes. type Dashboard struct { @@ -1496,7 +1816,7 @@ type EndpointInfo struct { // Supported values: - Must be unique within an org. - Must be less than 100 // characters. Name string `json:"name,omitempty"` - // current number of active sessions for the warehouse + // Deprecated. current number of active sessions for the warehouse NumActiveSessions int64 `json:"num_active_sessions,omitempty"` // current number of clusters running for the service NumClusters int `json:"num_clusters,omitempty"` @@ -1892,6 +2212,11 @@ type GetAlertRequest struct { Id string `json:"-" url:"-"` } +// Get an alert +type GetAlertV2Request struct { + Id string `json:"-" url:"-"` +} + // Get an alert type GetAlertsLegacyRequest struct { AlertId string `json:"-" url:"-"` @@ -2032,7 +2357,7 @@ type GetWarehouseResponse struct { // Supported values: - Must be unique within an org. - Must be less than 100 // characters. Name string `json:"name,omitempty"` - // current number of active sessions for the warehouse + // Deprecated. current number of active sessions for the warehouse NumActiveSessions int64 `json:"num_active_sessions,omitempty"` // current number of clusters running for the service NumClusters int `json:"num_clusters,omitempty"` @@ -2466,6 +2791,80 @@ func (s ListAlertsResponseAlert) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// List alerts +type ListAlertsV2Request struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ListAlertsV2Request) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAlertsV2Request) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAlertsV2Response struct { + NextPageToken string `json:"next_page_token,omitempty"` + + Results []ListAlertsV2ResponseAlert `json:"results,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ListAlertsV2Response) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAlertsV2Response) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAlertsV2ResponseAlert struct { + // The timestamp indicating when the alert was created. + CreateTime string `json:"create_time,omitempty"` + // Custom description for the alert. support mustache template. + CustomDescription string `json:"custom_description,omitempty"` + // Custom summary for the alert. support mustache template. + CustomSummary string `json:"custom_summary,omitempty"` + // The display name of the alert. + DisplayName string `json:"display_name,omitempty"` + + Evaluation *AlertV2Evaluation `json:"evaluation,omitempty"` + // UUID identifying the alert. + Id string `json:"id,omitempty"` + // Indicates whether the query is trashed. + LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` + // The owner's username. This field is set to "Unavailable" if the user has + // been deleted. + OwnerUserName string `json:"owner_user_name,omitempty"` + // Text of the query to be run. + QueryText string `json:"query_text,omitempty"` + // The run as username. This field is set to "Unavailable" if the user has + // been deleted. + RunAsUserName string `json:"run_as_user_name,omitempty"` + + Schedule *CronSchedule `json:"schedule,omitempty"` + // The timestamp indicating when the alert was updated. + UpdateTime string `json:"update_time,omitempty"` + // ID of the SQL warehouse attached to the alert. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ListAlertsV2ResponseAlert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAlertsV2ResponseAlert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get dashboard objects type ListDashboardsRequest struct { // Name of dashboard attribute to order by. @@ -3684,6 +4083,33 @@ func (f *RunAsRole) Type() string { return "RunAsRole" } +type SchedulePauseStatus string + +const SchedulePauseStatusPaused SchedulePauseStatus = `PAUSED` + +const SchedulePauseStatusUnpaused SchedulePauseStatus = `UNPAUSED` + +// String representation for [fmt.Print] +func (f *SchedulePauseStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SchedulePauseStatus) Set(v string) error { + switch v { + case `PAUSED`, `UNPAUSED`: + *f = SchedulePauseStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PAUSED", "UNPAUSED"`, v) + } +} + +// Type always returns SchedulePauseStatus to satisfy [pflag.Value] interface +func (f *SchedulePauseStatus) Type() string { + return "SchedulePauseStatus" +} + type ServiceError struct { ErrorCode ServiceErrorCode `json:"error_code,omitempty"` // A brief summary of the error condition. @@ -4402,6 +4828,11 @@ type TrashAlertRequest struct { Id string `json:"-" url:"-"` } +// Delete an alert +type TrashAlertV2Request struct { + Id string `json:"-" url:"-"` +} + // Delete a query type TrashQueryRequest struct { Id string `json:"-" url:"-"` @@ -4464,6 +4895,24 @@ func (s UpdateAlertRequestAlert) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type UpdateAlertV2Request struct { + Alert *AlertV2 `json:"alert,omitempty"` + // UUID identifying the alert. + Id string `json:"-" url:"-"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + UpdateMask string `json:"update_mask"` +} + type UpdateQueryRequest struct { Id string `json:"-" url:"-"` diff --git a/workspace_client.go b/workspace_client.go index 13cd2134f..a66fc8983 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -62,6 +62,9 @@ type WorkspaceClient struct { // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html AlertsLegacy sql.AlertsLegacyInterface + // TODO: Add description + AlertsV2 sql.AlertsV2Interface + // Apps run directly on a customer’s Databricks instance, integrate with // their data, use and extend Databricks services, and enable users to // interact through single sign-on. @@ -1176,6 +1179,7 @@ func NewWorkspaceClient(c ...*Config) (*WorkspaceClient, error) { AccountAccessControlProxy: iam.NewAccountAccessControlProxy(databricksClient), Alerts: sql.NewAlerts(databricksClient), AlertsLegacy: sql.NewAlertsLegacy(databricksClient), + AlertsV2: sql.NewAlertsV2(databricksClient), Apps: apps.NewApps(databricksClient), ArtifactAllowlists: catalog.NewArtifactAllowlists(databricksClient), Catalogs: catalog.NewCatalogs(databricksClient), From b4148e33554c0d5d5f6c306f9a7b3cfb2a3219c6 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 30 Apr 2025 08:26:01 +0000 Subject: [PATCH 2/3] changelog --- NEXT_CHANGELOG.md | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index e9d122be8..0c210415e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -31,3 +31,15 @@ * [Breaking] Removed `GroupId` and `ResourceId` fields for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). * [Breaking] Removed `Large`, `Medium` and `Small` enum values for [serving.ServedModelInputWorkloadSize](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInputWorkloadSize). * [Breaking] Removed `Blob`, `Dfs`, `MysqlServer` and `SqlServer` enum values for [settings.NccAzurePrivateEndpointRuleGroupId](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleGroupId). +* [Breaking] Field `AppDeployment` of `CreateAppDeploymentRequest` is changed from `*AppDeployment` to `AppDeployment`. +* [Breaking] Field `App` of `CreateAppRequest` is changed from `*App` to `App`. +* [Breaking] Field `App` of `UpdateAppRequest` is changed from `*App` to `App`. +* [Breaking] Field `BudgetPolicy` of `UpdateBudgetPolicyRequest` is changed from `*BudgetPolicy` to `BudgetPolicy`. +* [Breaking] Field `OnlineTable` of `CreateOnlineTableRequest` is changed from `*OnlineTable` to `OnlineTable`. +* [Breaking] Field `CleanRoomAsset` of `CreateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. +* [Breaking] Field `CleanRoom` of `CreateCleanRoomRequest` is changed from `*CleanRoom` to `CleanRoom`. +* [Breaking] Field `CleanRoomAsset` of `UpdateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. +* [Breaking] Field `Dashboard` of `CreateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. +* [Breaking] Field `Schedule` of `CreateScheduleRequest` is changed from `*Schedule` to `Schedule`. +* [Breaking] Field `Subscription` of `CreateSubscriptionRequest` is changed from `*Subscription` to `Subscription`. +* [Breaking] Field `Dashboard` of `UpdateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. From 1fa9c0390eae37bb14a1cc63aba39e05eab9e6be Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 30 Apr 2025 09:03:52 +0000 Subject: [PATCH 3/3] fix --- internal/auth_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/auth_test.go b/internal/auth_test.go index ef12d40a5..e4da34c1b 100644 --- a/internal/auth_test.go +++ b/internal/auth_test.go @@ -35,7 +35,7 @@ func TestUcAccWifAuth(t *testing.T) { // Setup Federation Policy p, err := a.ServicePrincipalFederationPolicy.Create(ctx, oauth2.CreateServicePrincipalFederationPolicyRequest{ - Policy: &oauth2.FederationPolicy{ + Policy: oauth2.FederationPolicy{ OidcPolicy: &oauth2.OidcFederationPolicy{ Issuer: "https://token.actions.githubusercontent.com", Audiences: []string{ @@ -117,7 +117,7 @@ func TestUcAccWifAuthWorkspace(t *testing.T) { // Setup Federation Policy p, err := a.ServicePrincipalFederationPolicy.Create(ctx, oauth2.CreateServicePrincipalFederationPolicyRequest{ - Policy: &oauth2.FederationPolicy{ + Policy: oauth2.FederationPolicy{ OidcPolicy: &oauth2.OidcFederationPolicy{ Issuer: "https://token.actions.githubusercontent.com", Audiences: []string{