diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 2924d5d6d..26ece1bc5 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -31b3fea21dbe5a3a652937691602eb66d6dba30b \ No newline at end of file +05692f4dcf168be190bb7bcda725ee8b368b7ae3 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 2fdb85ae0..68b9d1c6e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -11,3 +11,29 @@ ### Internal Changes ### API Changes +* Added [w.EnableExportNotebook](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableExportNotebookAPI) workspace-level service, [w.EnableNotebookTableClipboard](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableNotebookTableClipboardAPI) workspace-level service and [w.EnableResultsDownloading](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableResultsDownloadingAPI) workspace-level service. +* Added `GetCredentialsForTraceDataDownload` and `GetCredentialsForTraceDataUpload` methods for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service. +* Added `GetDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Added `GetPublishedDashboardTokenInfo` method for [w.LakeviewEmbedded](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#LakeviewEmbeddedAPI) workspace-level service. +* Added `BindingWorkspaceIds` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy). +* Added `DownloadId` field for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). +* Added `DashboardOutput` field for [jobs.RunOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunOutput). +* Added `DashboardTask` and `PowerBiTask` fields for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). +* Added `DashboardTask` and `PowerBiTask` fields for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). +* Added `DashboardTask` and `PowerBiTask` fields for [jobs.Task](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Task). +* Added `IncludeFeatures` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* Added `Models` field for [ml.LogInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogInputs). +* Added `DatasetDigest`, `DatasetName` and `ModelId` fields for [ml.LogMetric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogMetric). +* Added `DatasetDigest`, `DatasetName`, `ModelId` and `RunId` fields for [ml.Metric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Metric). +* Added `ModelInputs` field for [ml.RunInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#RunInputs). +* Added `ClientApplication` field for [sql.QueryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryInfo). +* Added `Geography` and `Geometry` enum values for [catalog.ColumnTypeName](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ColumnTypeName). +* Added `AllocationTimeoutNoHealthyAndWarmedUpClusters`, `DockerContainerCreationException`, `DockerImageTooLargeForInstanceException` and `DockerInvalidOsException` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* Added `Standard` enum value for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). +* Added `CanView` enum value for [sql.WarehousePermissionLevel](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#WarehousePermissionLevel). +* [Breaking] Changed `GenerateDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service . Method path has changed. +* [Breaking] Changed waiter for [CommandExecutionAPI.Create](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Create). +* [Breaking] Changed waiter for [CommandExecutionAPI.Execute](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Execute). +* [Breaking] Removed `Error`, `Status` and `TransientStatementId` fields for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). +* [Breaking] Removed `Balanced` and `CostOptimized` enum values for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). +* [Breaking] Removed [PipelinesAPI.WaitGetPipelineRunning](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelinesAPI.WaitGetPipelineRunning) method. diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index b52b79a32..a14f2697e 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -166,6 +166,15 @@ func NewMockWorkspaceClient(t interface { mockdisableLegacyDbfs := settings.NewMockDisableLegacyDbfsInterface(t) mocksettingsAPI.On("DisableLegacyDbfs").Return(mockdisableLegacyDbfs).Maybe() + mockenableExportNotebook := settings.NewMockEnableExportNotebookInterface(t) + mocksettingsAPI.On("EnableExportNotebook").Return(mockenableExportNotebook).Maybe() + + mockenableNotebookTableClipboard := settings.NewMockEnableNotebookTableClipboardInterface(t) + mocksettingsAPI.On("EnableNotebookTableClipboard").Return(mockenableNotebookTableClipboard).Maybe() + + mockenableResultsDownloading := settings.NewMockEnableResultsDownloadingInterface(t) + mocksettingsAPI.On("EnableResultsDownloading").Return(mockenableResultsDownloading).Maybe() + mockenhancedSecurityMonitoring := settings.NewMockEnhancedSecurityMonitoringInterface(t) mocksettingsAPI.On("EnhancedSecurityMonitoring").Return(mockenhancedSecurityMonitoring).Maybe() @@ -231,6 +240,30 @@ func (m *MockWorkspaceClient) GetMockDisableLegacyDbfsAPI() *settings.MockDisabl return api } +func (m *MockWorkspaceClient) GetMockEnableExportNotebookAPI() *settings.MockEnableExportNotebookInterface { + api, ok := m.GetMockSettingsAPI().EnableExportNotebook().(*settings.MockEnableExportNotebookInterface) + if !ok { + panic(fmt.Sprintf("expected EnableExportNotebook to be *settings.MockEnableExportNotebookInterface, actual was %T", m.GetMockSettingsAPI().EnableExportNotebook())) + } + return api +} + +func (m *MockWorkspaceClient) GetMockEnableNotebookTableClipboardAPI() *settings.MockEnableNotebookTableClipboardInterface { + api, ok := m.GetMockSettingsAPI().EnableNotebookTableClipboard().(*settings.MockEnableNotebookTableClipboardInterface) + if !ok { + panic(fmt.Sprintf("expected EnableNotebookTableClipboard to be *settings.MockEnableNotebookTableClipboardInterface, actual was %T", m.GetMockSettingsAPI().EnableNotebookTableClipboard())) + } + return api +} + +func (m *MockWorkspaceClient) GetMockEnableResultsDownloadingAPI() *settings.MockEnableResultsDownloadingInterface { + api, ok := m.GetMockSettingsAPI().EnableResultsDownloading().(*settings.MockEnableResultsDownloadingInterface) + if !ok { + panic(fmt.Sprintf("expected EnableResultsDownloading to be *settings.MockEnableResultsDownloadingInterface, actual was %T", m.GetMockSettingsAPI().EnableResultsDownloading())) + } + return api +} + func (m *MockWorkspaceClient) GetMockEnhancedSecurityMonitoringAPI() *settings.MockEnhancedSecurityMonitoringInterface { api, ok := m.GetMockSettingsAPI().EnhancedSecurityMonitoring().(*settings.MockEnhancedSecurityMonitoringInterface) if !ok { diff --git a/experimental/mocks/service/dashboards/mock_genie_interface.go b/experimental/mocks/service/dashboards/mock_genie_interface.go index c4ac32fd4..8803a3ca0 100644 --- a/experimental/mocks/service/dashboards/mock_genie_interface.go +++ b/experimental/mocks/service/dashboards/mock_genie_interface.go @@ -336,6 +336,128 @@ func (_c *MockGenieInterface_GenerateDownloadFullQueryResult_Call) RunAndReturn( return _c } +// GetDownloadFullQueryResult provides a mock function with given fields: ctx, request +func (_m *MockGenieInterface) GetDownloadFullQueryResult(ctx context.Context, request dashboards.GenieGetDownloadFullQueryResultRequest) (*dashboards.GenieGetDownloadFullQueryResultResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetDownloadFullQueryResult") + } + + var r0 *dashboards.GenieGetDownloadFullQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetDownloadFullQueryResultRequest) (*dashboards.GenieGetDownloadFullQueryResultResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetDownloadFullQueryResultRequest) *dashboards.GenieGetDownloadFullQueryResultResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGetDownloadFullQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieGetDownloadFullQueryResultRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetDownloadFullQueryResult_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDownloadFullQueryResult' +type MockGenieInterface_GetDownloadFullQueryResult_Call struct { + *mock.Call +} + +// GetDownloadFullQueryResult is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GenieGetDownloadFullQueryResultRequest +func (_e *MockGenieInterface_Expecter) GetDownloadFullQueryResult(ctx interface{}, request interface{}) *MockGenieInterface_GetDownloadFullQueryResult_Call { + return &MockGenieInterface_GetDownloadFullQueryResult_Call{Call: _e.mock.On("GetDownloadFullQueryResult", ctx, request)} +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResult_Call) Run(run func(ctx context.Context, request dashboards.GenieGetDownloadFullQueryResultRequest)) *MockGenieInterface_GetDownloadFullQueryResult_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GenieGetDownloadFullQueryResultRequest)) + }) + return _c +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResult_Call) Return(_a0 *dashboards.GenieGetDownloadFullQueryResultResponse, _a1 error) *MockGenieInterface_GetDownloadFullQueryResult_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResult_Call) RunAndReturn(run func(context.Context, dashboards.GenieGetDownloadFullQueryResultRequest) (*dashboards.GenieGetDownloadFullQueryResultResponse, error)) *MockGenieInterface_GetDownloadFullQueryResult_Call { + _c.Call.Return(run) + return _c +} + +// GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId provides a mock function with given fields: ctx, spaceId, conversationId, messageId, attachmentId, downloadId +func (_m *MockGenieInterface) GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*dashboards.GenieGetDownloadFullQueryResultResponse, error) { + ret := _m.Called(ctx, spaceId, conversationId, messageId, attachmentId, downloadId) + + if len(ret) == 0 { + panic("no return value specified for GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId") + } + + var r0 *dashboards.GenieGetDownloadFullQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string) (*dashboards.GenieGetDownloadFullQueryResultResponse, error)); ok { + return rf(ctx, spaceId, conversationId, messageId, attachmentId, downloadId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string) *dashboards.GenieGetDownloadFullQueryResultResponse); ok { + r0 = rf(ctx, spaceId, conversationId, messageId, attachmentId, downloadId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGetDownloadFullQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string, string) error); ok { + r1 = rf(ctx, spaceId, conversationId, messageId, attachmentId, downloadId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId' +type MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call struct { + *mock.Call +} + +// GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId is a helper method to define mock.On call +// - ctx context.Context +// - spaceId string +// - conversationId string +// - messageId string +// - attachmentId string +// - downloadId string +func (_e *MockGenieInterface_Expecter) GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx interface{}, spaceId interface{}, conversationId interface{}, messageId interface{}, attachmentId interface{}, downloadId interface{}) *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call { + return &MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call{Call: _e.mock.On("GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId", ctx, spaceId, conversationId, messageId, attachmentId, downloadId)} +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call) Run(run func(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string)) *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string), args[5].(string)) + }) + return _c +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call) Return(_a0 *dashboards.GenieGetDownloadFullQueryResultResponse, _a1 error) *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call) RunAndReturn(run func(context.Context, string, string, string, string, string) (*dashboards.GenieGetDownloadFullQueryResultResponse, error)) *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call { + _c.Call.Return(run) + return _c +} + // GetMessage provides a mock function with given fields: ctx, request func (_m *MockGenieInterface) GetMessage(ctx context.Context, request dashboards.GenieGetConversationMessageRequest) (*dashboards.GenieMessage, error) { ret := _m.Called(ctx, request) diff --git a/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go b/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go index 4e2cc2b04..51479c6ef 100644 --- a/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go +++ b/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go @@ -116,6 +116,124 @@ func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboard return _c } +// GetPublishedDashboardTokenInfo provides a mock function with given fields: ctx, request +func (_m *MockLakeviewEmbeddedInterface) GetPublishedDashboardTokenInfo(ctx context.Context, request dashboards.GetPublishedDashboardTokenInfoRequest) (*dashboards.GetPublishedDashboardTokenInfoResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetPublishedDashboardTokenInfo") + } + + var r0 *dashboards.GetPublishedDashboardTokenInfoResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GetPublishedDashboardTokenInfoRequest) (*dashboards.GetPublishedDashboardTokenInfoResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GetPublishedDashboardTokenInfoRequest) *dashboards.GetPublishedDashboardTokenInfoResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GetPublishedDashboardTokenInfoResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GetPublishedDashboardTokenInfoRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPublishedDashboardTokenInfo' +type MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call struct { + *mock.Call +} + +// GetPublishedDashboardTokenInfo is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GetPublishedDashboardTokenInfoRequest +func (_e *MockLakeviewEmbeddedInterface_Expecter) GetPublishedDashboardTokenInfo(ctx interface{}, request interface{}) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call { + return &MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call{Call: _e.mock.On("GetPublishedDashboardTokenInfo", ctx, request)} +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call) Run(run func(ctx context.Context, request dashboards.GetPublishedDashboardTokenInfoRequest)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GetPublishedDashboardTokenInfoRequest)) + }) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call) Return(_a0 *dashboards.GetPublishedDashboardTokenInfoResponse, _a1 error) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call) RunAndReturn(run func(context.Context, dashboards.GetPublishedDashboardTokenInfoRequest) (*dashboards.GetPublishedDashboardTokenInfoResponse, error)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call { + _c.Call.Return(run) + return _c +} + +// GetPublishedDashboardTokenInfoByDashboardId provides a mock function with given fields: ctx, dashboardId +func (_m *MockLakeviewEmbeddedInterface) GetPublishedDashboardTokenInfoByDashboardId(ctx context.Context, dashboardId string) (*dashboards.GetPublishedDashboardTokenInfoResponse, error) { + ret := _m.Called(ctx, dashboardId) + + if len(ret) == 0 { + panic("no return value specified for GetPublishedDashboardTokenInfoByDashboardId") + } + + var r0 *dashboards.GetPublishedDashboardTokenInfoResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*dashboards.GetPublishedDashboardTokenInfoResponse, error)); ok { + return rf(ctx, dashboardId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *dashboards.GetPublishedDashboardTokenInfoResponse); ok { + r0 = rf(ctx, dashboardId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GetPublishedDashboardTokenInfoResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, dashboardId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPublishedDashboardTokenInfoByDashboardId' +type MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call struct { + *mock.Call +} + +// GetPublishedDashboardTokenInfoByDashboardId is a helper method to define mock.On call +// - ctx context.Context +// - dashboardId string +func (_e *MockLakeviewEmbeddedInterface_Expecter) GetPublishedDashboardTokenInfoByDashboardId(ctx interface{}, dashboardId interface{}) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call { + return &MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call{Call: _e.mock.On("GetPublishedDashboardTokenInfoByDashboardId", ctx, dashboardId)} +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call) Run(run func(ctx context.Context, dashboardId string)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call) Return(_a0 *dashboards.GetPublishedDashboardTokenInfoResponse, _a1 error) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call) RunAndReturn(run func(context.Context, string) (*dashboards.GetPublishedDashboardTokenInfoResponse, error)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call { + _c.Call.Return(run) + return _c +} + // NewMockLakeviewEmbeddedInterface creates a new instance of MockLakeviewEmbeddedInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMockLakeviewEmbeddedInterface(t interface { diff --git a/experimental/mocks/service/ml/mock_experiments_interface.go b/experimental/mocks/service/ml/mock_experiments_interface.go index e13c8f47f..7a7226309 100644 --- a/experimental/mocks/service/ml/mock_experiments_interface.go +++ b/experimental/mocks/service/ml/mock_experiments_interface.go @@ -401,6 +401,242 @@ func (_c *MockExperimentsInterface_GetByName_Call) RunAndReturn(run func(context return _c } +// GetCredentialsForTraceDataDownload provides a mock function with given fields: ctx, request +func (_m *MockExperimentsInterface) GetCredentialsForTraceDataDownload(ctx context.Context, request ml.GetCredentialsForTraceDataDownloadRequest) (*ml.GetCredentialsForTraceDataDownloadResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetCredentialsForTraceDataDownload") + } + + var r0 *ml.GetCredentialsForTraceDataDownloadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ml.GetCredentialsForTraceDataDownloadRequest) (*ml.GetCredentialsForTraceDataDownloadResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, ml.GetCredentialsForTraceDataDownloadRequest) *ml.GetCredentialsForTraceDataDownloadResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.GetCredentialsForTraceDataDownloadResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ml.GetCredentialsForTraceDataDownloadRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCredentialsForTraceDataDownload' +type MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call struct { + *mock.Call +} + +// GetCredentialsForTraceDataDownload is a helper method to define mock.On call +// - ctx context.Context +// - request ml.GetCredentialsForTraceDataDownloadRequest +func (_e *MockExperimentsInterface_Expecter) GetCredentialsForTraceDataDownload(ctx interface{}, request interface{}) *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call { + return &MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call{Call: _e.mock.On("GetCredentialsForTraceDataDownload", ctx, request)} +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call) Run(run func(ctx context.Context, request ml.GetCredentialsForTraceDataDownloadRequest)) *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ml.GetCredentialsForTraceDataDownloadRequest)) + }) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call) Return(_a0 *ml.GetCredentialsForTraceDataDownloadResponse, _a1 error) *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call) RunAndReturn(run func(context.Context, ml.GetCredentialsForTraceDataDownloadRequest) (*ml.GetCredentialsForTraceDataDownloadResponse, error)) *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call { + _c.Call.Return(run) + return _c +} + +// GetCredentialsForTraceDataDownloadByRequestId provides a mock function with given fields: ctx, requestId +func (_m *MockExperimentsInterface) GetCredentialsForTraceDataDownloadByRequestId(ctx context.Context, requestId string) (*ml.GetCredentialsForTraceDataDownloadResponse, error) { + ret := _m.Called(ctx, requestId) + + if len(ret) == 0 { + panic("no return value specified for GetCredentialsForTraceDataDownloadByRequestId") + } + + var r0 *ml.GetCredentialsForTraceDataDownloadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*ml.GetCredentialsForTraceDataDownloadResponse, error)); ok { + return rf(ctx, requestId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *ml.GetCredentialsForTraceDataDownloadResponse); ok { + r0 = rf(ctx, requestId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.GetCredentialsForTraceDataDownloadResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, requestId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCredentialsForTraceDataDownloadByRequestId' +type MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call struct { + *mock.Call +} + +// GetCredentialsForTraceDataDownloadByRequestId is a helper method to define mock.On call +// - ctx context.Context +// - requestId string +func (_e *MockExperimentsInterface_Expecter) GetCredentialsForTraceDataDownloadByRequestId(ctx interface{}, requestId interface{}) *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call { + return &MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call{Call: _e.mock.On("GetCredentialsForTraceDataDownloadByRequestId", ctx, requestId)} +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call) Run(run func(ctx context.Context, requestId string)) *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call) Return(_a0 *ml.GetCredentialsForTraceDataDownloadResponse, _a1 error) *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call) RunAndReturn(run func(context.Context, string) (*ml.GetCredentialsForTraceDataDownloadResponse, error)) *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call { + _c.Call.Return(run) + return _c +} + +// GetCredentialsForTraceDataUpload provides a mock function with given fields: ctx, request +func (_m *MockExperimentsInterface) GetCredentialsForTraceDataUpload(ctx context.Context, request ml.GetCredentialsForTraceDataUploadRequest) (*ml.GetCredentialsForTraceDataUploadResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetCredentialsForTraceDataUpload") + } + + var r0 *ml.GetCredentialsForTraceDataUploadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ml.GetCredentialsForTraceDataUploadRequest) (*ml.GetCredentialsForTraceDataUploadResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, ml.GetCredentialsForTraceDataUploadRequest) *ml.GetCredentialsForTraceDataUploadResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.GetCredentialsForTraceDataUploadResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ml.GetCredentialsForTraceDataUploadRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCredentialsForTraceDataUpload' +type MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call struct { + *mock.Call +} + +// GetCredentialsForTraceDataUpload is a helper method to define mock.On call +// - ctx context.Context +// - request ml.GetCredentialsForTraceDataUploadRequest +func (_e *MockExperimentsInterface_Expecter) GetCredentialsForTraceDataUpload(ctx interface{}, request interface{}) *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call { + return &MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call{Call: _e.mock.On("GetCredentialsForTraceDataUpload", ctx, request)} +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call) Run(run func(ctx context.Context, request ml.GetCredentialsForTraceDataUploadRequest)) *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ml.GetCredentialsForTraceDataUploadRequest)) + }) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call) Return(_a0 *ml.GetCredentialsForTraceDataUploadResponse, _a1 error) *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call) RunAndReturn(run func(context.Context, ml.GetCredentialsForTraceDataUploadRequest) (*ml.GetCredentialsForTraceDataUploadResponse, error)) *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call { + _c.Call.Return(run) + return _c +} + +// GetCredentialsForTraceDataUploadByRequestId provides a mock function with given fields: ctx, requestId +func (_m *MockExperimentsInterface) GetCredentialsForTraceDataUploadByRequestId(ctx context.Context, requestId string) (*ml.GetCredentialsForTraceDataUploadResponse, error) { + ret := _m.Called(ctx, requestId) + + if len(ret) == 0 { + panic("no return value specified for GetCredentialsForTraceDataUploadByRequestId") + } + + var r0 *ml.GetCredentialsForTraceDataUploadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*ml.GetCredentialsForTraceDataUploadResponse, error)); ok { + return rf(ctx, requestId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *ml.GetCredentialsForTraceDataUploadResponse); ok { + r0 = rf(ctx, requestId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.GetCredentialsForTraceDataUploadResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, requestId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCredentialsForTraceDataUploadByRequestId' +type MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call struct { + *mock.Call +} + +// GetCredentialsForTraceDataUploadByRequestId is a helper method to define mock.On call +// - ctx context.Context +// - requestId string +func (_e *MockExperimentsInterface_Expecter) GetCredentialsForTraceDataUploadByRequestId(ctx interface{}, requestId interface{}) *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call { + return &MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call{Call: _e.mock.On("GetCredentialsForTraceDataUploadByRequestId", ctx, requestId)} +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call) Run(run func(ctx context.Context, requestId string)) *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call) Return(_a0 *ml.GetCredentialsForTraceDataUploadResponse, _a1 error) *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call) RunAndReturn(run func(context.Context, string) (*ml.GetCredentialsForTraceDataUploadResponse, error)) *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call { + _c.Call.Return(run) + return _c +} + // GetExperiment provides a mock function with given fields: ctx, request func (_m *MockExperimentsInterface) GetExperiment(ctx context.Context, request ml.GetExperimentRequest) (*ml.GetExperimentResponse, error) { ret := _m.Called(ctx, request) diff --git a/experimental/mocks/service/pipelines/mock_pipelines_interface.go b/experimental/mocks/service/pipelines/mock_pipelines_interface.go index db41cbca0..0ac64dd7a 100644 --- a/experimental/mocks/service/pipelines/mock_pipelines_interface.go +++ b/experimental/mocks/service/pipelines/mock_pipelines_interface.go @@ -1583,67 +1583,6 @@ func (_c *MockPipelinesInterface_WaitGetPipelineIdle_Call) RunAndReturn(run func return _c } -// WaitGetPipelineRunning provides a mock function with given fields: ctx, pipelineId, timeout, callback -func (_m *MockPipelinesInterface) WaitGetPipelineRunning(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error) { - ret := _m.Called(ctx, pipelineId, timeout, callback) - - if len(ret) == 0 { - panic("no return value specified for WaitGetPipelineRunning") - } - - var r0 *pipelines.GetPipelineResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error)); ok { - return rf(ctx, pipelineId, timeout, callback) - } - if rf, ok := ret.Get(0).(func(context.Context, string, time.Duration, func(*pipelines.GetPipelineResponse)) *pipelines.GetPipelineResponse); ok { - r0 = rf(ctx, pipelineId, timeout, callback) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*pipelines.GetPipelineResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, time.Duration, func(*pipelines.GetPipelineResponse)) error); ok { - r1 = rf(ctx, pipelineId, timeout, callback) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// MockPipelinesInterface_WaitGetPipelineRunning_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitGetPipelineRunning' -type MockPipelinesInterface_WaitGetPipelineRunning_Call struct { - *mock.Call -} - -// WaitGetPipelineRunning is a helper method to define mock.On call -// - ctx context.Context -// - pipelineId string -// - timeout time.Duration -// - callback func(*pipelines.GetPipelineResponse) -func (_e *MockPipelinesInterface_Expecter) WaitGetPipelineRunning(ctx interface{}, pipelineId interface{}, timeout interface{}, callback interface{}) *MockPipelinesInterface_WaitGetPipelineRunning_Call { - return &MockPipelinesInterface_WaitGetPipelineRunning_Call{Call: _e.mock.On("WaitGetPipelineRunning", ctx, pipelineId, timeout, callback)} -} - -func (_c *MockPipelinesInterface_WaitGetPipelineRunning_Call) Run(run func(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*pipelines.GetPipelineResponse))) *MockPipelinesInterface_WaitGetPipelineRunning_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(time.Duration), args[3].(func(*pipelines.GetPipelineResponse))) - }) - return _c -} - -func (_c *MockPipelinesInterface_WaitGetPipelineRunning_Call) Return(_a0 *pipelines.GetPipelineResponse, _a1 error) *MockPipelinesInterface_WaitGetPipelineRunning_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *MockPipelinesInterface_WaitGetPipelineRunning_Call) RunAndReturn(run func(context.Context, string, time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error)) *MockPipelinesInterface_WaitGetPipelineRunning_Call { - _c.Call.Return(run) - return _c -} - // NewMockPipelinesInterface creates a new instance of MockPipelinesInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMockPipelinesInterface(t interface { diff --git a/experimental/mocks/service/settings/mock_enable_export_notebook_interface.go b/experimental/mocks/service/settings/mock_enable_export_notebook_interface.go new file mode 100644 index 000000000..29cabb26b --- /dev/null +++ b/experimental/mocks/service/settings/mock_enable_export_notebook_interface.go @@ -0,0 +1,154 @@ +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package settings + +import ( + context "context" + + settings "github.com/databricks/databricks-sdk-go/service/settings" + mock "github.com/stretchr/testify/mock" +) + +// MockEnableExportNotebookInterface is an autogenerated mock type for the EnableExportNotebookInterface type +type MockEnableExportNotebookInterface struct { + mock.Mock +} + +type MockEnableExportNotebookInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockEnableExportNotebookInterface) EXPECT() *MockEnableExportNotebookInterface_Expecter { + return &MockEnableExportNotebookInterface_Expecter{mock: &_m.Mock} +} + +// GetEnableExportNotebook provides a mock function with given fields: ctx +func (_m *MockEnableExportNotebookInterface) GetEnableExportNotebook(ctx context.Context) (*settings.EnableExportNotebook, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetEnableExportNotebook") + } + + var r0 *settings.EnableExportNotebook + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*settings.EnableExportNotebook, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *settings.EnableExportNotebook); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableExportNotebook) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableExportNotebookInterface_GetEnableExportNotebook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnableExportNotebook' +type MockEnableExportNotebookInterface_GetEnableExportNotebook_Call struct { + *mock.Call +} + +// GetEnableExportNotebook is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockEnableExportNotebookInterface_Expecter) GetEnableExportNotebook(ctx interface{}) *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call { + return &MockEnableExportNotebookInterface_GetEnableExportNotebook_Call{Call: _e.mock.On("GetEnableExportNotebook", ctx)} +} + +func (_c *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call) Run(run func(ctx context.Context)) *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call) Return(_a0 *settings.EnableExportNotebook, _a1 error) *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call) RunAndReturn(run func(context.Context) (*settings.EnableExportNotebook, error)) *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call { + _c.Call.Return(run) + return _c +} + +// PatchEnableExportNotebook provides a mock function with given fields: ctx, request +func (_m *MockEnableExportNotebookInterface) PatchEnableExportNotebook(ctx context.Context, request settings.UpdateEnableExportNotebookRequest) (*settings.EnableExportNotebook, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for PatchEnableExportNotebook") + } + + var r0 *settings.EnableExportNotebook + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableExportNotebookRequest) (*settings.EnableExportNotebook, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableExportNotebookRequest) *settings.EnableExportNotebook); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableExportNotebook) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateEnableExportNotebookRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchEnableExportNotebook' +type MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call struct { + *mock.Call +} + +// PatchEnableExportNotebook is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateEnableExportNotebookRequest +func (_e *MockEnableExportNotebookInterface_Expecter) PatchEnableExportNotebook(ctx interface{}, request interface{}) *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call { + return &MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call{Call: _e.mock.On("PatchEnableExportNotebook", ctx, request)} +} + +func (_c *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call) Run(run func(ctx context.Context, request settings.UpdateEnableExportNotebookRequest)) *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateEnableExportNotebookRequest)) + }) + return _c +} + +func (_c *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call) Return(_a0 *settings.EnableExportNotebook, _a1 error) *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call) RunAndReturn(run func(context.Context, settings.UpdateEnableExportNotebookRequest) (*settings.EnableExportNotebook, error)) *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call { + _c.Call.Return(run) + return _c +} + +// NewMockEnableExportNotebookInterface creates a new instance of MockEnableExportNotebookInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockEnableExportNotebookInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockEnableExportNotebookInterface { + mock := &MockEnableExportNotebookInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/settings/mock_enable_notebook_table_clipboard_interface.go b/experimental/mocks/service/settings/mock_enable_notebook_table_clipboard_interface.go new file mode 100644 index 000000000..9d4b917e2 --- /dev/null +++ b/experimental/mocks/service/settings/mock_enable_notebook_table_clipboard_interface.go @@ -0,0 +1,154 @@ +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package settings + +import ( + context "context" + + settings "github.com/databricks/databricks-sdk-go/service/settings" + mock "github.com/stretchr/testify/mock" +) + +// MockEnableNotebookTableClipboardInterface is an autogenerated mock type for the EnableNotebookTableClipboardInterface type +type MockEnableNotebookTableClipboardInterface struct { + mock.Mock +} + +type MockEnableNotebookTableClipboardInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockEnableNotebookTableClipboardInterface) EXPECT() *MockEnableNotebookTableClipboardInterface_Expecter { + return &MockEnableNotebookTableClipboardInterface_Expecter{mock: &_m.Mock} +} + +// GetEnableNotebookTableClipboard provides a mock function with given fields: ctx +func (_m *MockEnableNotebookTableClipboardInterface) GetEnableNotebookTableClipboard(ctx context.Context) (*settings.EnableNotebookTableClipboard, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetEnableNotebookTableClipboard") + } + + var r0 *settings.EnableNotebookTableClipboard + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*settings.EnableNotebookTableClipboard, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *settings.EnableNotebookTableClipboard); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableNotebookTableClipboard) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnableNotebookTableClipboard' +type MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call struct { + *mock.Call +} + +// GetEnableNotebookTableClipboard is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockEnableNotebookTableClipboardInterface_Expecter) GetEnableNotebookTableClipboard(ctx interface{}) *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call { + return &MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call{Call: _e.mock.On("GetEnableNotebookTableClipboard", ctx)} +} + +func (_c *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call) Run(run func(ctx context.Context)) *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call) Return(_a0 *settings.EnableNotebookTableClipboard, _a1 error) *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call) RunAndReturn(run func(context.Context) (*settings.EnableNotebookTableClipboard, error)) *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call { + _c.Call.Return(run) + return _c +} + +// PatchEnableNotebookTableClipboard provides a mock function with given fields: ctx, request +func (_m *MockEnableNotebookTableClipboardInterface) PatchEnableNotebookTableClipboard(ctx context.Context, request settings.UpdateEnableNotebookTableClipboardRequest) (*settings.EnableNotebookTableClipboard, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for PatchEnableNotebookTableClipboard") + } + + var r0 *settings.EnableNotebookTableClipboard + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableNotebookTableClipboardRequest) (*settings.EnableNotebookTableClipboard, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableNotebookTableClipboardRequest) *settings.EnableNotebookTableClipboard); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableNotebookTableClipboard) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateEnableNotebookTableClipboardRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchEnableNotebookTableClipboard' +type MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call struct { + *mock.Call +} + +// PatchEnableNotebookTableClipboard is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateEnableNotebookTableClipboardRequest +func (_e *MockEnableNotebookTableClipboardInterface_Expecter) PatchEnableNotebookTableClipboard(ctx interface{}, request interface{}) *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call { + return &MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call{Call: _e.mock.On("PatchEnableNotebookTableClipboard", ctx, request)} +} + +func (_c *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call) Run(run func(ctx context.Context, request settings.UpdateEnableNotebookTableClipboardRequest)) *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateEnableNotebookTableClipboardRequest)) + }) + return _c +} + +func (_c *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call) Return(_a0 *settings.EnableNotebookTableClipboard, _a1 error) *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call) RunAndReturn(run func(context.Context, settings.UpdateEnableNotebookTableClipboardRequest) (*settings.EnableNotebookTableClipboard, error)) *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call { + _c.Call.Return(run) + return _c +} + +// NewMockEnableNotebookTableClipboardInterface creates a new instance of MockEnableNotebookTableClipboardInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockEnableNotebookTableClipboardInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockEnableNotebookTableClipboardInterface { + mock := &MockEnableNotebookTableClipboardInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/settings/mock_enable_results_downloading_interface.go b/experimental/mocks/service/settings/mock_enable_results_downloading_interface.go new file mode 100644 index 000000000..e25b79736 --- /dev/null +++ b/experimental/mocks/service/settings/mock_enable_results_downloading_interface.go @@ -0,0 +1,154 @@ +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package settings + +import ( + context "context" + + settings "github.com/databricks/databricks-sdk-go/service/settings" + mock "github.com/stretchr/testify/mock" +) + +// MockEnableResultsDownloadingInterface is an autogenerated mock type for the EnableResultsDownloadingInterface type +type MockEnableResultsDownloadingInterface struct { + mock.Mock +} + +type MockEnableResultsDownloadingInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockEnableResultsDownloadingInterface) EXPECT() *MockEnableResultsDownloadingInterface_Expecter { + return &MockEnableResultsDownloadingInterface_Expecter{mock: &_m.Mock} +} + +// GetEnableResultsDownloading provides a mock function with given fields: ctx +func (_m *MockEnableResultsDownloadingInterface) GetEnableResultsDownloading(ctx context.Context) (*settings.EnableResultsDownloading, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetEnableResultsDownloading") + } + + var r0 *settings.EnableResultsDownloading + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*settings.EnableResultsDownloading, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *settings.EnableResultsDownloading); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableResultsDownloading) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnableResultsDownloading' +type MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call struct { + *mock.Call +} + +// GetEnableResultsDownloading is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockEnableResultsDownloadingInterface_Expecter) GetEnableResultsDownloading(ctx interface{}) *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call { + return &MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call{Call: _e.mock.On("GetEnableResultsDownloading", ctx)} +} + +func (_c *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call) Run(run func(ctx context.Context)) *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call) Return(_a0 *settings.EnableResultsDownloading, _a1 error) *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call) RunAndReturn(run func(context.Context) (*settings.EnableResultsDownloading, error)) *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call { + _c.Call.Return(run) + return _c +} + +// PatchEnableResultsDownloading provides a mock function with given fields: ctx, request +func (_m *MockEnableResultsDownloadingInterface) PatchEnableResultsDownloading(ctx context.Context, request settings.UpdateEnableResultsDownloadingRequest) (*settings.EnableResultsDownloading, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for PatchEnableResultsDownloading") + } + + var r0 *settings.EnableResultsDownloading + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableResultsDownloadingRequest) (*settings.EnableResultsDownloading, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableResultsDownloadingRequest) *settings.EnableResultsDownloading); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableResultsDownloading) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateEnableResultsDownloadingRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchEnableResultsDownloading' +type MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call struct { + *mock.Call +} + +// PatchEnableResultsDownloading is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateEnableResultsDownloadingRequest +func (_e *MockEnableResultsDownloadingInterface_Expecter) PatchEnableResultsDownloading(ctx interface{}, request interface{}) *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call { + return &MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call{Call: _e.mock.On("PatchEnableResultsDownloading", ctx, request)} +} + +func (_c *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call) Run(run func(ctx context.Context, request settings.UpdateEnableResultsDownloadingRequest)) *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateEnableResultsDownloadingRequest)) + }) + return _c +} + +func (_c *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call) Return(_a0 *settings.EnableResultsDownloading, _a1 error) *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call) RunAndReturn(run func(context.Context, settings.UpdateEnableResultsDownloadingRequest) (*settings.EnableResultsDownloading, error)) *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call { + _c.Call.Return(run) + return _c +} + +// NewMockEnableResultsDownloadingInterface creates a new instance of MockEnableResultsDownloadingInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockEnableResultsDownloadingInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockEnableResultsDownloadingInterface { + mock := &MockEnableResultsDownloadingInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/settings/mock_settings_interface.go b/experimental/mocks/service/settings/mock_settings_interface.go index 73f937d27..01dd451ca 100644 --- a/experimental/mocks/service/settings/mock_settings_interface.go +++ b/experimental/mocks/service/settings/mock_settings_interface.go @@ -349,6 +349,147 @@ func (_c *MockSettingsInterface_DisableLegacyDbfs_Call) RunAndReturn(run func() return _c } +// EnableExportNotebook provides a mock function with no fields +func (_m *MockSettingsInterface) EnableExportNotebook() settings.EnableExportNotebookInterface { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnableExportNotebook") + } + + var r0 settings.EnableExportNotebookInterface + if rf, ok := ret.Get(0).(func() settings.EnableExportNotebookInterface); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(settings.EnableExportNotebookInterface) + } + } + + return r0 +} + +// MockSettingsInterface_EnableExportNotebook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnableExportNotebook' +type MockSettingsInterface_EnableExportNotebook_Call struct { + *mock.Call +} + +// EnableExportNotebook is a helper method to define mock.On call +func (_e *MockSettingsInterface_Expecter) EnableExportNotebook() *MockSettingsInterface_EnableExportNotebook_Call { + return &MockSettingsInterface_EnableExportNotebook_Call{Call: _e.mock.On("EnableExportNotebook")} +} + +func (_c *MockSettingsInterface_EnableExportNotebook_Call) Run(run func()) *MockSettingsInterface_EnableExportNotebook_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSettingsInterface_EnableExportNotebook_Call) Return(_a0 settings.EnableExportNotebookInterface) *MockSettingsInterface_EnableExportNotebook_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockSettingsInterface_EnableExportNotebook_Call) RunAndReturn(run func() settings.EnableExportNotebookInterface) *MockSettingsInterface_EnableExportNotebook_Call { + _c.Call.Return(run) + return _c +} + +// EnableNotebookTableClipboard provides a mock function with no fields +func (_m *MockSettingsInterface) EnableNotebookTableClipboard() settings.EnableNotebookTableClipboardInterface { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnableNotebookTableClipboard") + } + + var r0 settings.EnableNotebookTableClipboardInterface + if rf, ok := ret.Get(0).(func() settings.EnableNotebookTableClipboardInterface); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(settings.EnableNotebookTableClipboardInterface) + } + } + + return r0 +} + +// MockSettingsInterface_EnableNotebookTableClipboard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnableNotebookTableClipboard' +type MockSettingsInterface_EnableNotebookTableClipboard_Call struct { + *mock.Call +} + +// EnableNotebookTableClipboard is a helper method to define mock.On call +func (_e *MockSettingsInterface_Expecter) EnableNotebookTableClipboard() *MockSettingsInterface_EnableNotebookTableClipboard_Call { + return &MockSettingsInterface_EnableNotebookTableClipboard_Call{Call: _e.mock.On("EnableNotebookTableClipboard")} +} + +func (_c *MockSettingsInterface_EnableNotebookTableClipboard_Call) Run(run func()) *MockSettingsInterface_EnableNotebookTableClipboard_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSettingsInterface_EnableNotebookTableClipboard_Call) Return(_a0 settings.EnableNotebookTableClipboardInterface) *MockSettingsInterface_EnableNotebookTableClipboard_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockSettingsInterface_EnableNotebookTableClipboard_Call) RunAndReturn(run func() settings.EnableNotebookTableClipboardInterface) *MockSettingsInterface_EnableNotebookTableClipboard_Call { + _c.Call.Return(run) + return _c +} + +// EnableResultsDownloading provides a mock function with no fields +func (_m *MockSettingsInterface) EnableResultsDownloading() settings.EnableResultsDownloadingInterface { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnableResultsDownloading") + } + + var r0 settings.EnableResultsDownloadingInterface + if rf, ok := ret.Get(0).(func() settings.EnableResultsDownloadingInterface); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(settings.EnableResultsDownloadingInterface) + } + } + + return r0 +} + +// MockSettingsInterface_EnableResultsDownloading_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnableResultsDownloading' +type MockSettingsInterface_EnableResultsDownloading_Call struct { + *mock.Call +} + +// EnableResultsDownloading is a helper method to define mock.On call +func (_e *MockSettingsInterface_Expecter) EnableResultsDownloading() *MockSettingsInterface_EnableResultsDownloading_Call { + return &MockSettingsInterface_EnableResultsDownloading_Call{Call: _e.mock.On("EnableResultsDownloading")} +} + +func (_c *MockSettingsInterface_EnableResultsDownloading_Call) Run(run func()) *MockSettingsInterface_EnableResultsDownloading_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSettingsInterface_EnableResultsDownloading_Call) Return(_a0 settings.EnableResultsDownloadingInterface) *MockSettingsInterface_EnableResultsDownloading_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockSettingsInterface_EnableResultsDownloading_Call) RunAndReturn(run func() settings.EnableResultsDownloadingInterface) *MockSettingsInterface_EnableResultsDownloading_Call { + _c.Call.Return(run) + return _c +} + // EnhancedSecurityMonitoring provides a mock function with no fields func (_m *MockSettingsInterface) EnhancedSecurityMonitoring() settings.EnhancedSecurityMonitoringInterface { ret := _m.Called() diff --git a/service/billing/model.go b/service/billing/model.go index d9199a2e2..3de61625b 100644 --- a/service/billing/model.go +++ b/service/billing/model.go @@ -253,6 +253,10 @@ type BudgetConfigurationFilterWorkspaceIdClause struct { // Contains the BudgetPolicy details. type BudgetPolicy struct { + // List of workspaces that this budget policy will be exclusively bound to. + // An empty binding implies that this budget policy is open to any workspace + // in the account. + BindingWorkspaceIds []int64 `json:"binding_workspace_ids,omitempty"` // A list of tags defined by the customer. At most 20 entries are allowed // per policy. CustomTags []compute.CustomPolicyTag `json:"custom_tags,omitempty"` diff --git a/service/catalog/model.go b/service/catalog/model.go index 2658e51f2..9708c90cd 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -549,6 +549,10 @@ const ColumnTypeNameDouble ColumnTypeName = `DOUBLE` const ColumnTypeNameFloat ColumnTypeName = `FLOAT` +const ColumnTypeNameGeography ColumnTypeName = `GEOGRAPHY` + +const ColumnTypeNameGeometry ColumnTypeName = `GEOMETRY` + const ColumnTypeNameInt ColumnTypeName = `INT` const ColumnTypeNameInterval ColumnTypeName = `INTERVAL` @@ -583,11 +587,11 @@ func (f *ColumnTypeName) String() string { // Set raw string value and validate it against allowed values func (f *ColumnTypeName) Set(v string) error { switch v { - case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT`: + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `GEOGRAPHY`, `GEOMETRY`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT`: *f = ColumnTypeName(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE", "VARIANT"`, v) + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "GEOGRAPHY", "GEOMETRY", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE", "VARIANT"`, v) } } @@ -1217,7 +1221,12 @@ type CreateVolumeRequestContent struct { SchemaName string `json:"schema_name"` // The storage location on the cloud StorageLocation string `json:"storage_location,omitempty"` - + // The type of the volume. An external volume is located in the specified + // external location. A managed volume is located in the default location + // which is specified by the parent schema, or the parent catalog, or the + // Metastore. [Learn more] + // + // [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external VolumeType VolumeType `json:"volume_type"` ForceSendFields []string `json:"-" url:"-"` @@ -6087,7 +6096,12 @@ type VolumeInfo struct { UpdatedBy string `json:"updated_by,omitempty"` // The unique identifier of the volume VolumeId string `json:"volume_id,omitempty"` - + // The type of the volume. An external volume is located in the specified + // external location. A managed volume is located in the default location + // which is specified by the parent schema, or the parent catalog, or the + // Metastore. [Learn more] + // + // [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external VolumeType VolumeType `json:"volume_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -6101,6 +6115,12 @@ func (s VolumeInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// The type of the volume. An external volume is located in the specified +// external location. A managed volume is located in the default location which +// is specified by the parent schema, or the parent catalog, or the Metastore. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external type VolumeType string const VolumeTypeExternal VolumeType = `EXTERNAL` diff --git a/service/compute/model.go b/service/compute/model.go index 37bb90553..c34ffb688 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2759,6 +2759,9 @@ type Environment struct { // project path>(WSFS or Volumes in Databricks), E.g. // dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] Dependencies []string `json:"dependencies,omitempty"` + // List of jar dependencies, should be string representing volume paths. For + // example: `/Volumes/path/to/test.jar`. + JarDependencies []string `json:"jar_dependencies,omitempty"` } type EventDetails struct { @@ -4601,6 +4604,8 @@ func (s LogSyncStatus) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type MapAny map[string]any + type MavenLibrary struct { // Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". Coordinates string `json:"coordinates"` @@ -5215,6 +5220,8 @@ const TerminationReasonCodeAllocationTimeout TerminationReasonCode = `ALLOCATION const TerminationReasonCodeAllocationTimeoutNodeDaemonNotReady TerminationReasonCode = `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY` +const TerminationReasonCodeAllocationTimeoutNoHealthyAndWarmedUpClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS` + const TerminationReasonCodeAllocationTimeoutNoHealthyClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS` const TerminationReasonCodeAllocationTimeoutNoMatchedClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS` @@ -5319,8 +5326,14 @@ const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_ const TerminationReasonCodeDisasterRecoveryReplication TerminationReasonCode = `DISASTER_RECOVERY_REPLICATION` +const TerminationReasonCodeDockerContainerCreationException TerminationReasonCode = `DOCKER_CONTAINER_CREATION_EXCEPTION` + const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE` +const TerminationReasonCodeDockerImageTooLargeForInstanceException TerminationReasonCode = `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION` + +const TerminationReasonCodeDockerInvalidOsException TerminationReasonCode = `DOCKER_INVALID_OS_EXCEPTION` + const TerminationReasonCodeDriverEviction TerminationReasonCode = `DRIVER_EVICTION` const TerminationReasonCodeDriverLaunchTimeout TerminationReasonCode = `DRIVER_LAUNCH_TIMEOUT` @@ -5531,11 +5544,11 @@ func (f *TerminationReasonCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationReasonCode) Set(v string) error { switch v { - case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DOCKER_IMAGE_PULL_FAILURE`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: + case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: *f = TerminationReasonCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DOCKER_IMAGE_PULL_FAILURE", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) } } diff --git a/service/dashboards/api.go b/service/dashboards/api.go index b12d265ef..3e6b1ccbf 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -48,11 +48,50 @@ type GenieInterface interface { // Generate full query result download. // - // Initiate full SQL query result download and obtain a transient ID for - // tracking the download progress. This call initiates a new SQL execution to - // generate the query result. + // Initiate full SQL query result download and obtain a `download_id` to track + // the download progress. This call initiates a new SQL execution to generate + // the query result. The result is stored in an external link can be retrieved + // using the [Get Download Full Query + // Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks + // strongly recommends that you protect the URLs that are returned by the + // `EXTERNAL_LINKS` disposition. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. GenerateDownloadFullQueryResult(ctx context.Context, request GenieGenerateDownloadFullQueryResultRequest) (*GenieGenerateDownloadFullQueryResultResponse, error) + // Get download full query result. + // + // After [Generating a Full Query Result + // Download](:method:genie/getdownloadfullqueryresult) and successfully + // receiving a `download_id`, use this API to Poll download progress and + // retrieve the SQL query result external link(s) upon completion. Warning: + // Databricks strongly recommends that you protect the URLs that are returned by + // the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` + // disposition, a short-lived, presigned URL is generated, which can be used to + // download the results directly from Amazon S3. As a short-lived access + // credential is embedded in this presigned URL, you should protect the URL. + // Because presigned URLs are already generated with embedded temporary access + // credentials, you must not set an Authorization header in the download + // requests. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. + GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) + + // Get download full query result. + // + // After [Generating a Full Query Result + // Download](:method:genie/getdownloadfullqueryresult) and successfully + // receiving a `download_id`, use this API to Poll download progress and + // retrieve the SQL query result external link(s) upon completion. Warning: + // Databricks strongly recommends that you protect the URLs that are returned by + // the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` + // disposition, a short-lived, presigned URL is generated, which can be used to + // download the results directly from Amazon S3. As a short-lived access + // credential is embedded in this presigned URL, you should protect the URL. + // Because presigned URLs are already generated with embedded temporary access + // credentials, you must not set an Authorization header in the download + // requests. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. + GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*GenieGetDownloadFullQueryResultResponse, error) + // Get conversation message. // // Get message from conversation. @@ -254,6 +293,31 @@ func (a *GenieAPI) CreateMessageAndWait(ctx context.Context, genieCreateConversa return wait.Get() } +// Get download full query result. +// +// After [Generating a Full Query Result +// Download](:method:genie/getdownloadfullqueryresult) and successfully +// receiving a `download_id`, use this API to Poll download progress and +// retrieve the SQL query result external link(s) upon completion. Warning: +// Databricks strongly recommends that you protect the URLs that are returned by +// the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` +// disposition, a short-lived, presigned URL is generated, which can be used to +// download the results directly from Amazon S3. As a short-lived access +// credential is embedded in this presigned URL, you should protect the URL. +// Because presigned URLs are already generated with embedded temporary access +// credentials, you must not set an Authorization header in the download +// requests. See [Execute +// Statement](:method:statementexecution/executestatement) for more details. +func (a *GenieAPI) GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*GenieGetDownloadFullQueryResultResponse, error) { + return a.genieImpl.GetDownloadFullQueryResult(ctx, GenieGetDownloadFullQueryResultRequest{ + SpaceId: spaceId, + ConversationId: conversationId, + MessageId: messageId, + AttachmentId: attachmentId, + DownloadId: downloadId, + }) +} + // Get conversation message. // // Get message from conversation. @@ -606,6 +670,30 @@ type LakeviewEmbeddedInterface interface { // // Get the current published dashboard within an embedded context. GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error + + // Read an information of a published dashboard to mint an OAuth token. + // + // Get a required authorization details and scopes of a published dashboard to + // mint an OAuth token. The `authorization_details` can be enriched to apply + // additional restriction. + // + // Example: Adding the following `authorization_details` object to downscope the + // viewer permission to specific table ``` { type: "unity_catalog_privileges", + // privileges: ["SELECT"], object_type: "TABLE", object_full_path: + // "main.default.testdata" } ``` + GetPublishedDashboardTokenInfo(ctx context.Context, request GetPublishedDashboardTokenInfoRequest) (*GetPublishedDashboardTokenInfoResponse, error) + + // Read an information of a published dashboard to mint an OAuth token. + // + // Get a required authorization details and scopes of a published dashboard to + // mint an OAuth token. The `authorization_details` can be enriched to apply + // additional restriction. + // + // Example: Adding the following `authorization_details` object to downscope the + // viewer permission to specific table ``` { type: "unity_catalog_privileges", + // privileges: ["SELECT"], object_type: "TABLE", object_full_path: + // "main.default.testdata" } ``` + GetPublishedDashboardTokenInfoByDashboardId(ctx context.Context, dashboardId string) (*GetPublishedDashboardTokenInfoResponse, error) } func NewLakeviewEmbedded(client *client.DatabricksClient) *LakeviewEmbeddedAPI { @@ -630,6 +718,22 @@ func (a *LakeviewEmbeddedAPI) GetPublishedDashboardEmbeddedByDashboardId(ctx con }) } +// Read an information of a published dashboard to mint an OAuth token. +// +// Get a required authorization details and scopes of a published dashboard to +// mint an OAuth token. The `authorization_details` can be enriched to apply +// additional restriction. +// +// Example: Adding the following `authorization_details` object to downscope the +// viewer permission to specific table ``` { type: "unity_catalog_privileges", +// privileges: ["SELECT"], object_type: "TABLE", object_full_path: +// "main.default.testdata" } ``` +func (a *LakeviewEmbeddedAPI) GetPublishedDashboardTokenInfoByDashboardId(ctx context.Context, dashboardId string) (*GetPublishedDashboardTokenInfoResponse, error) { + return a.lakeviewEmbeddedImpl.GetPublishedDashboardTokenInfo(ctx, GetPublishedDashboardTokenInfoRequest{ + DashboardId: dashboardId, + }) +} + type QueryExecutionInterface interface { // Cancel the results for the a query for a published, embedded dashboard. diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index 0fc08d294..da4987e7e 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -50,7 +50,7 @@ func (a *genieImpl) ExecuteMessageQuery(ctx context.Context, request GenieExecut func (a *genieImpl) GenerateDownloadFullQueryResult(ctx context.Context, request GenieGenerateDownloadFullQueryResultRequest) (*GenieGenerateDownloadFullQueryResultResponse, error) { var genieGenerateDownloadFullQueryResultResponse GenieGenerateDownloadFullQueryResultResponse - path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/attachments/%v/generate-download", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) + path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/attachments/%v/downloads", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -58,6 +58,16 @@ func (a *genieImpl) GenerateDownloadFullQueryResult(ctx context.Context, request return &genieGenerateDownloadFullQueryResultResponse, err } +func (a *genieImpl) GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) { + var genieGetDownloadFullQueryResultResponse GenieGetDownloadFullQueryResultResponse + path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/attachments/%v/downloads/%v", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId, request.DownloadId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieGetDownloadFullQueryResultResponse) + return &genieGetDownloadFullQueryResultResponse, err +} + func (a *genieImpl) GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) { var genieMessage GenieMessage path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v", request.SpaceId, request.ConversationId, request.MessageId) @@ -419,6 +429,16 @@ func (a *lakeviewEmbeddedImpl) GetPublishedDashboardEmbedded(ctx context.Context return err } +func (a *lakeviewEmbeddedImpl) GetPublishedDashboardTokenInfo(ctx context.Context, request GetPublishedDashboardTokenInfoRequest) (*GetPublishedDashboardTokenInfoResponse, error) { + var getPublishedDashboardTokenInfoResponse GetPublishedDashboardTokenInfoResponse + path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/published/tokeninfo", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedDashboardTokenInfoResponse) + return &getPublishedDashboardTokenInfoResponse, err +} + // unexported type that holds implementations of just QueryExecution API methods type queryExecutionImpl struct { client *client.DatabricksClient diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index 983d37125..0df971d5f 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -33,11 +33,33 @@ type GenieService interface { // Generate full query result download. // - // Initiate full SQL query result download and obtain a transient ID for - // tracking the download progress. This call initiates a new SQL execution - // to generate the query result. + // Initiate full SQL query result download and obtain a `download_id` to + // track the download progress. This call initiates a new SQL execution to + // generate the query result. The result is stored in an external link can + // be retrieved using the [Get Download Full Query + // Result](:method:genie/getdownloadfullqueryresult) API. Warning: + // Databricks strongly recommends that you protect the URLs that are + // returned by the `EXTERNAL_LINKS` disposition. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. GenerateDownloadFullQueryResult(ctx context.Context, request GenieGenerateDownloadFullQueryResultRequest) (*GenieGenerateDownloadFullQueryResultResponse, error) + // Get download full query result. + // + // After [Generating a Full Query Result + // Download](:method:genie/getdownloadfullqueryresult) and successfully + // receiving a `download_id`, use this API to Poll download progress and + // retrieve the SQL query result external link(s) upon completion. Warning: + // Databricks strongly recommends that you protect the URLs that are + // returned by the `EXTERNAL_LINKS` disposition. When you use the + // `EXTERNAL_LINKS` disposition, a short-lived, presigned URL is generated, + // which can be used to download the results directly from Amazon S3. As a + // short-lived access credential is embedded in this presigned URL, you + // should protect the URL. Because presigned URLs are already generated with + // embedded temporary access credentials, you must not set an Authorization + // header in the download requests. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. + GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) + // Get conversation message. // // Get message from conversation. @@ -164,6 +186,18 @@ type LakeviewEmbeddedService interface { // // Get the current published dashboard within an embedded context. GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error + + // Read an information of a published dashboard to mint an OAuth token. + // + // Get a required authorization details and scopes of a published dashboard + // to mint an OAuth token. The `authorization_details` can be enriched to + // apply additional restriction. + // + // Example: Adding the following `authorization_details` object to downscope + // the viewer permission to specific table ``` { type: + // "unity_catalog_privileges", privileges: ["SELECT"], object_type: "TABLE", + // object_full_path: "main.default.testdata" } ``` + GetPublishedDashboardTokenInfo(ctx context.Context, request GetPublishedDashboardTokenInfoRequest) (*GetPublishedDashboardTokenInfoResponse, error) } // Query execution APIs for AI / BI Dashboards diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 3482b74a0..b9b9b168a 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -9,6 +9,48 @@ import ( "github.com/databricks/databricks-sdk-go/service/sql" ) +type AuthorizationDetails struct { + // Represents downscoped permission rules with specific access rights. This + // field is specific to `workspace_rule_set` constraint. + GrantRules []AuthorizationDetailsGrantRule `json:"grant_rules,omitempty"` + // The acl path of the tree store resource resource. + ResourceLegacyAclPath string `json:"resource_legacy_acl_path,omitempty"` + // The resource name to which the authorization rule applies. This field is + // specific to `workspace_rule_set` constraint. Format: + // `workspaces/{workspace_id}/dashboards/{dashboard_id}` + ResourceName string `json:"resource_name,omitempty"` + // The type of authorization downscoping policy. Ex: `workspace_rule_set` + // defines access rules for a specific workspace resource + Type string `json:"type,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AuthorizationDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AuthorizationDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AuthorizationDetailsGrantRule struct { + // Permission sets for dashboard are defined in + // iam-common/rbac-common/permission-sets/definitions/TreeStoreBasePermissionSets + // Ex: `permissionSets/dashboard.runner` + PermissionSet string `json:"permission_set,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AuthorizationDetailsGrantRule) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AuthorizationDetailsGrantRule) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Cancel the results for the a query for a published, embedded dashboard type CancelPublishedQueryExecutionRequest struct { DashboardName string `json:"-" url:"dashboard_name"` @@ -317,13 +359,9 @@ type GenieGenerateDownloadFullQueryResultRequest struct { } type GenieGenerateDownloadFullQueryResultResponse struct { - // Error message if Genie failed to download the result - Error string `json:"error,omitempty"` - // Download result status - Status MessageStatus `json:"status,omitempty"` - // Transient Statement ID. Use this ID to track the download request in - // subsequent polling calls - TransientStatementId string `json:"transient_statement_id,omitempty"` + // Download ID. Use this ID to track the download request in subsequent + // polling calls + DownloadId string `json:"download_id,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -348,6 +386,27 @@ type GenieGetConversationMessageRequest struct { SpaceId string `json:"-" url:"-"` } +// Get download full query result +type GenieGetDownloadFullQueryResultRequest struct { + // Attachment ID + AttachmentId string `json:"-" url:"-"` + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Download ID. This ID is provided by the [Generate Download + // endpoint](:method:genie/generateDownloadFullQueryResult) + DownloadId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Space ID + SpaceId string `json:"-" url:"-"` +} + +type GenieGetDownloadFullQueryResultResponse struct { + // SQL Statement Execution response. See [Get status, manifest, and result + // first chunk](:method:statementexecution/getstatement) for more details. + StatementResponse *sql.StatementResponse `json:"statement_response,omitempty"` +} + // Get message attachment SQL query result type GenieGetMessageAttachmentQueryResultRequest struct { // Attachment ID @@ -553,6 +612,49 @@ type GetPublishedDashboardRequest struct { DashboardId string `json:"-" url:"-"` } +// Read an information of a published dashboard to mint an OAuth token. +type GetPublishedDashboardTokenInfoRequest struct { + // UUID identifying the published dashboard. + DashboardId string `json:"-" url:"-"` + // Provided external value to be included in the custom claim. + ExternalValue string `json:"-" url:"external_value,omitempty"` + // Provided external viewer id to be included in the custom claim. + ExternalViewerId string `json:"-" url:"external_viewer_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GetPublishedDashboardTokenInfoRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPublishedDashboardTokenInfoRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetPublishedDashboardTokenInfoResponse struct { + // Authorization constraints for accessing the published dashboard. + // Currently includes `workspace_rule_set` and could be enriched with + // `unity_catalog_privileges` before oAuth token generation. + AuthorizationDetails []AuthorizationDetails `json:"authorization_details,omitempty"` + // Custom claim generated from external_value and external_viewer_id. + // Format: + // `urn:aibi:external_data:::` + CustomClaim string `json:"custom_claim,omitempty"` + // Scope defining access permissions. + Scope string `json:"scope,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GetPublishedDashboardTokenInfoResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPublishedDashboardTokenInfoResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get dashboard schedule type GetScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. diff --git a/service/jobs/api.go b/service/jobs/api.go index bbb59f854..1eb5da1c1 100755 --- a/service/jobs/api.go +++ b/service/jobs/api.go @@ -82,22 +82,28 @@ type JobsInterface interface { // // Retrieves the details for a single job. // - // In Jobs API 2.2, requests for a single job support pagination of `tasks` and - // `job_clusters` when either exceeds 100 elements. Use the `next_page_token` - // field to check for more results and pass its value as the `page_token` in - // subsequent requests. Arrays with fewer than 100 elements in a page will be - // empty on later pages. + // Large arrays in the results will be paginated when they exceed 100 elements. + // A request for a single job will return all properties for that job, and the + // first 100 elements of array properties (`tasks`, `job_clusters`, + // `environments` and `parameters`). Use the `next_page_token` field to check + // for more results and pass its value as the `page_token` in subsequent + // requests. If any array properties have more than 100 elements, additional + // results will be returned on subsequent requests. Arrays without additional + // results will be empty on later pages. Get(ctx context.Context, request GetJobRequest) (*Job, error) // Get a single job. // // Retrieves the details for a single job. // - // In Jobs API 2.2, requests for a single job support pagination of `tasks` and - // `job_clusters` when either exceeds 100 elements. Use the `next_page_token` - // field to check for more results and pass its value as the `page_token` in - // subsequent requests. Arrays with fewer than 100 elements in a page will be - // empty on later pages. + // Large arrays in the results will be paginated when they exceed 100 elements. + // A request for a single job will return all properties for that job, and the + // first 100 elements of array properties (`tasks`, `job_clusters`, + // `environments` and `parameters`). Use the `next_page_token` field to check + // for more results and pass its value as the `page_token` in subsequent + // requests. If any array properties have more than 100 elements, additional + // results will be returned on subsequent requests. Arrays without additional + // results will be empty on later pages. GetByJobId(ctx context.Context, jobId int64) (*Job, error) // Get job permission levels. @@ -126,11 +132,14 @@ type JobsInterface interface { // // Retrieves the metadata of a run. // - // In Jobs API 2.2, requests for a single job run support pagination of `tasks` - // and `job_clusters` when either exceeds 100 elements. Use the - // `next_page_token` field to check for more results and pass its value as the - // `page_token` in subsequent requests. Arrays with fewer than 100 elements in a - // page will be empty on later pages. + // Large arrays in the results will be paginated when they exceed 100 elements. + // A request for a single run will return all properties for that run, and the + // first 100 elements of array properties (`tasks`, `job_clusters`, + // `job_parameters` and `repair_history`). Use the next_page_token field to + // check for more results and pass its value as the page_token in subsequent + // requests. If any array properties have more than 100 elements, additional + // results will be returned on subsequent requests. Arrays without additional + // results will be empty on later pages. GetRun(ctx context.Context, request GetRunRequest) (*Run, error) // Get the output for a single run. @@ -449,11 +458,14 @@ func (a *JobsAPI) DeleteRunByRunId(ctx context.Context, runId int64) error { // // Retrieves the details for a single job. // -// In Jobs API 2.2, requests for a single job support pagination of `tasks` and -// `job_clusters` when either exceeds 100 elements. Use the `next_page_token` -// field to check for more results and pass its value as the `page_token` in -// subsequent requests. Arrays with fewer than 100 elements in a page will be -// empty on later pages. +// Large arrays in the results will be paginated when they exceed 100 elements. +// A request for a single job will return all properties for that job, and the +// first 100 elements of array properties (`tasks`, `job_clusters`, +// `environments` and `parameters`). Use the `next_page_token` field to check +// for more results and pass its value as the `page_token` in subsequent +// requests. If any array properties have more than 100 elements, additional +// results will be returned on subsequent requests. Arrays without additional +// results will be empty on later pages. func (a *JobsAPI) GetByJobId(ctx context.Context, jobId int64) (*Job, error) { return a.jobsImpl.Get(ctx, GetJobRequest{ JobId: jobId, diff --git a/service/jobs/interface.go b/service/jobs/interface.go index be9b5518d..a012a92b7 100755 --- a/service/jobs/interface.go +++ b/service/jobs/interface.go @@ -62,11 +62,14 @@ type JobsService interface { // // Retrieves the details for a single job. // - // In Jobs API 2.2, requests for a single job support pagination of `tasks` - // and `job_clusters` when either exceeds 100 elements. Use the + // Large arrays in the results will be paginated when they exceed 100 + // elements. A request for a single job will return all properties for that + // job, and the first 100 elements of array properties (`tasks`, + // `job_clusters`, `environments` and `parameters`). Use the // `next_page_token` field to check for more results and pass its value as - // the `page_token` in subsequent requests. Arrays with fewer than 100 - // elements in a page will be empty on later pages. + // the `page_token` in subsequent requests. If any array properties have + // more than 100 elements, additional results will be returned on subsequent + // requests. Arrays without additional results will be empty on later pages. Get(ctx context.Context, request GetJobRequest) (*Job, error) // Get job permission levels. @@ -84,11 +87,14 @@ type JobsService interface { // // Retrieves the metadata of a run. // - // In Jobs API 2.2, requests for a single job run support pagination of - // `tasks` and `job_clusters` when either exceeds 100 elements. Use the - // `next_page_token` field to check for more results and pass its value as - // the `page_token` in subsequent requests. Arrays with fewer than 100 - // elements in a page will be empty on later pages. + // Large arrays in the results will be paginated when they exceed 100 + // elements. A request for a single run will return all properties for that + // run, and the first 100 elements of array properties (`tasks`, + // `job_clusters`, `job_parameters` and `repair_history`). Use the + // next_page_token field to check for more results and pass its value as the + // page_token in subsequent requests. If any array properties have more than + // 100 elements, additional results will be returned on subsequent requests. + // Arrays without additional results will be empty on later pages. GetRun(ctx context.Context, request GetRunRequest) (*Run, error) // Get the output for a single run. diff --git a/service/jobs/model.go b/service/jobs/model.go index 4272ce10f..eb7628933 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -9,6 +9,33 @@ import ( "github.com/databricks/databricks-sdk-go/service/compute" ) +type AuthenticationMethod string + +const AuthenticationMethodOauth AuthenticationMethod = `OAUTH` + +const AuthenticationMethodPat AuthenticationMethod = `PAT` + +// String representation for [fmt.Print] +func (f *AuthenticationMethod) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AuthenticationMethod) Set(v string) error { + switch v { + case `OAUTH`, `PAT`: + *f = AuthenticationMethod(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OAUTH", "PAT"`, v) + } +} + +// Type always returns AuthenticationMethod to satisfy [pflag.Value] interface +func (f *AuthenticationMethod) Type() string { + return "AuthenticationMethod" +} + type BaseJob struct { // The time at which this job was created in epoch milliseconds // (milliseconds since 1/1/1970 UTC). @@ -23,7 +50,7 @@ type BaseJob struct { // based on accessible budget policies of the run_as identity on job // creation or modification. EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` - // Indicates if the job has more sub-resources (`tasks`, `job_clusters`) + // Indicates if the job has more array properties (`tasks`, `job_clusters`) // that are not shown. They can be accessed via :method:jobs/get endpoint. // It is only relevant for API 2.2 :method:jobs/list requests with // `expand_tasks=true`. @@ -72,10 +99,14 @@ type BaseRun struct { CreatorUserName string `json:"creator_user_name,omitempty"` // Description of the run Description string `json:"description,omitempty"` - // effective_performance_target is the actual performance target used by the - // run during execution. effective_performance_target can differ from the - // client-set performance_target depending on if the job was eligible to be - // cost-optimized. + // The actual performance target used by the serverless run during + // execution. This can differ from the client-set performance target on the + // request depending on whether the performance mode is supported by the job + // type. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. @@ -99,7 +130,7 @@ type BaseRun struct { // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` - // Indicates if the run has more sub-resources (`tasks`, `job_clusters`) + // Indicates if the run has more array properties (`tasks`, `job_clusters`) // that are not shown. They can be accessed via :method:jobs/getrun // endpoint. It is only relevant for API 2.2 :method:jobs/listruns requests // with `expand_tasks=true`. @@ -427,7 +458,6 @@ func (s ClusterSpec) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Next field: 4 type ComputeConfig struct { // IDof the GPU pool to use. GpuNodePoolId string `json:"gpu_node_pool_id"` @@ -596,9 +626,7 @@ type CreateJob struct { Health *JobsHealthRules `json:"health,omitempty"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. - // You must declare dependent libraries in task settings. If more than 100 - // job clusters are available, you can paginate through them using - // :method:jobs/get. + // You must declare dependent libraries in task settings. JobClusters []JobCluster `json:"job_clusters,omitempty"` // An optional maximum allowed number of concurrent runs of the job. Set // this value if you want to be able to execute multiple runs of the same @@ -621,8 +649,13 @@ type CreateJob struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` - // PerformanceTarget defines how performant or cost efficient the execution - // of run on serverless should be. + // The performance mode on a serverless job. The performance target + // determines the level of compute performance or cost-efficiency for the + // run. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` @@ -641,10 +674,13 @@ type CreateJob struct { // limitations as cluster tags. A maximum of 25 tags can be added to the // job. Tags map[string]string `json:"tags,omitempty"` - // A list of task specifications to be executed by this job. If more than - // 100 tasks are available, you can paginate through them using - // :method:jobs/get. Use the `next_page_token` field at the object root to - // determine if more results are available. + // A list of task specifications to be executed by this job. It supports up + // to 1000 elements in write endpoints (:method:jobs/create, + // :method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read + // endpoints return only 100 tasks. If more than 100 tasks are available, + // you can paginate through them using :method:jobs/get. Use the + // `next_page_token` field at the object root to determine if more results + // are available. Tasks []Task `json:"tasks,omitempty"` // An optional timeout applied to each run of this job. A value of `0` means // no timeout. @@ -699,6 +735,46 @@ type CronSchedule struct { TimezoneId string `json:"timezone_id"` } +type DashboardPageSnapshot struct { + PageDisplayName string `json:"page_display_name,omitempty"` + + WidgetErrorDetails []WidgetErrorDetail `json:"widget_error_details,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *DashboardPageSnapshot) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DashboardPageSnapshot) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Configures the Lakeview Dashboard job task type. +type DashboardTask struct { + DashboardId string `json:"dashboard_id,omitempty"` + + Subscription *Subscription `json:"subscription,omitempty"` + // The warehouse id to execute the dashboard with for the schedule + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *DashboardTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DashboardTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DashboardTaskOutput struct { + // Should only be populated for manual PDF download jobs. + PageSnapshots []DashboardPageSnapshot `json:"page_snapshots,omitempty"` +} + type DbtOutput struct { // An optional map of headers to send when retrieving the artifact from the // `artifacts_link`. @@ -1000,11 +1076,10 @@ func (f *Format) Type() string { return "Format" } -// Next field: 9 type GenAiComputeTask struct { // Command launcher to run the actual script, e.g. bash, python etc. Command string `json:"command,omitempty"` - // Next field: 4 + Compute *ComputeConfig `json:"compute,omitempty"` // Runtime image DlRuntimeImage string `json:"dl_runtime_image"` @@ -1067,8 +1142,8 @@ type GetJobRequest struct { // The canonical identifier of the job to retrieve information about. This // field is required. JobId int64 `json:"-" url:"job_id"` - // Use `next_page_token` returned from the previous GetJob to request the - // next page of the job's sub-resources. + // Use `next_page_token` returned from the previous GetJob response to + // request the next page of the job's array properties. PageToken string `json:"-" url:"page_token,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1124,8 +1199,8 @@ type GetRunRequest struct { IncludeHistory bool `json:"-" url:"include_history,omitempty"` // Whether to include resolved parameter values in the response. IncludeResolvedValues bool `json:"-" url:"include_resolved_values,omitempty"` - // Use `next_page_token` returned from the previous GetRun to request the - // next page of the run's sub-resources. + // Use `next_page_token` returned from the previous GetRun response to + // request the next page of the run's array properties. PageToken string `json:"-" url:"page_token,omitempty"` // The canonical identifier of the run for which to retrieve the metadata. // This field is required. @@ -1258,14 +1333,14 @@ type Job struct { // based on accessible budget policies of the run_as identity on job // creation or modification. EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` - // Indicates if the job has more sub-resources (`tasks`, `job_clusters`) + // Indicates if the job has more array properties (`tasks`, `job_clusters`) // that are not shown. They can be accessed via :method:jobs/get endpoint. // It is only relevant for API 2.2 :method:jobs/list requests with // `expand_tasks=true`. HasMore bool `json:"has_more,omitempty"` // The canonical identifier for this job. JobId int64 `json:"job_id,omitempty"` - // A token that can be used to list the next page of sub-resources. + // A token that can be used to list the next page of array properties. NextPageToken string `json:"next_page_token,omitempty"` // The email of an active workspace user or the application ID of a service // principal that the job runs as. This value can be changed by setting the @@ -1709,9 +1784,7 @@ type JobSettings struct { Health *JobsHealthRules `json:"health,omitempty"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. - // You must declare dependent libraries in task settings. If more than 100 - // job clusters are available, you can paginate through them using - // :method:jobs/get. + // You must declare dependent libraries in task settings. JobClusters []JobCluster `json:"job_clusters,omitempty"` // An optional maximum allowed number of concurrent runs of the job. Set // this value if you want to be able to execute multiple runs of the same @@ -1734,8 +1807,13 @@ type JobSettings struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` - // PerformanceTarget defines how performant or cost efficient the execution - // of run on serverless should be. + // The performance mode on a serverless job. The performance target + // determines the level of compute performance or cost-efficiency for the + // run. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` @@ -1754,10 +1832,13 @@ type JobSettings struct { // limitations as cluster tags. A maximum of 25 tags can be added to the // job. Tags map[string]string `json:"tags,omitempty"` - // A list of task specifications to be executed by this job. If more than - // 100 tasks are available, you can paginate through them using - // :method:jobs/get. Use the `next_page_token` field at the object root to - // determine if more results are available. + // A list of task specifications to be executed by this job. It supports up + // to 1000 elements in write endpoints (:method:jobs/create, + // :method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read + // endpoints return only 100 tasks. If more than 100 tasks are available, + // you can paginate through them using :method:jobs/get. Use the + // `next_page_token` field at the object root to determine if more results + // are available. Tasks []Task `json:"tasks,omitempty"` // An optional timeout applied to each run of this job. A value of `0` means // no timeout. @@ -1996,9 +2077,9 @@ func (s ListJobComplianceRequest) MarshalJSON() ([]byte, error) { // List jobs type ListJobsRequest struct { - // Whether to include task and cluster details in the response. Note that in - // API 2.2, only the first 100 elements will be shown. Use :method:jobs/get - // to paginate through all tasks and clusters. + // Whether to include task and cluster details in the response. Note that + // only the first 100 elements will be shown. Use :method:jobs/get to + // paginate through all tasks and clusters. ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"` // The number of jobs to return. This value must be greater than 0 and less // or equal to 100. The default value is 20. @@ -2060,9 +2141,9 @@ type ListRunsRequest struct { // results; otherwise, lists both active and completed runs. This field // cannot be `true` when active_only is `true`. CompletedOnly bool `json:"-" url:"completed_only,omitempty"` - // Whether to include task and cluster details in the response. Note that in - // API 2.2, only the first 100 elements will be shown. Use - // :method:jobs/getrun to paginate through all tasks and clusters. + // Whether to include task and cluster details in the response. Note that + // only the first 100 elements will be shown. Use :method:jobs/getrun to + // paginate through all tasks and clusters. ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"` // The job for which to list runs. If omitted, the Jobs service lists runs // from all jobs. @@ -2253,12 +2334,10 @@ func (f *PauseStatus) Type() string { // Cluster Manager (see cluster-common PerformanceTarget). type PerformanceTarget string -const PerformanceTargetBalanced PerformanceTarget = `BALANCED` - -const PerformanceTargetCostOptimized PerformanceTarget = `COST_OPTIMIZED` - const PerformanceTargetPerformanceOptimized PerformanceTarget = `PERFORMANCE_OPTIMIZED` +const PerformanceTargetStandard PerformanceTarget = `STANDARD` + // String representation for [fmt.Print] func (f *PerformanceTarget) String() string { return string(*f) @@ -2267,11 +2346,11 @@ func (f *PerformanceTarget) String() string { // Set raw string value and validate it against allowed values func (f *PerformanceTarget) Set(v string) error { switch v { - case `BALANCED`, `COST_OPTIMIZED`, `PERFORMANCE_OPTIMIZED`: + case `PERFORMANCE_OPTIMIZED`, `STANDARD`: *f = PerformanceTarget(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BALANCED", "COST_OPTIMIZED", "PERFORMANCE_OPTIMIZED"`, v) + return fmt.Errorf(`value "%s" is not one of "PERFORMANCE_OPTIMIZED", "STANDARD"`, v) } } @@ -2348,6 +2427,74 @@ func (s PipelineTask) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type PowerBiModel struct { + // How the published Power BI model authenticates to Databricks + AuthenticationMethod AuthenticationMethod `json:"authentication_method,omitempty"` + // The name of the Power BI model + ModelName string `json:"model_name,omitempty"` + // Whether to overwrite existing Power BI models + OverwriteExisting bool `json:"overwrite_existing,omitempty"` + // The default storage mode of the Power BI model + StorageMode StorageMode `json:"storage_mode,omitempty"` + // The name of the Power BI workspace of the model + WorkspaceName string `json:"workspace_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *PowerBiModel) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PowerBiModel) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PowerBiTable struct { + // The catalog name in Databricks + Catalog string `json:"catalog,omitempty"` + // The table name in Databricks + Name string `json:"name,omitempty"` + // The schema name in Databricks + Schema string `json:"schema,omitempty"` + // The Power BI storage mode of the table + StorageMode StorageMode `json:"storage_mode,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *PowerBiTable) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PowerBiTable) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PowerBiTask struct { + // The resource name of the UC connection to authenticate from Databricks to + // Power BI + ConnectionResourceName string `json:"connection_resource_name,omitempty"` + // The semantic model to update + PowerBiModel *PowerBiModel `json:"power_bi_model,omitempty"` + // Whether the model should be refreshed after the update + RefreshAfterUpdate bool `json:"refresh_after_update,omitempty"` + // The tables to be exported to Power BI + Tables []PowerBiTable `json:"tables,omitempty"` + // The SQL warehouse ID to use as the Power BI data source + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *PowerBiTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PowerBiTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type PythonWheelTask struct { // Named entry point to use, if it does not exist in the metadata of the // package it executes the function from the package directly using @@ -2727,10 +2874,14 @@ type Run struct { CreatorUserName string `json:"creator_user_name,omitempty"` // Description of the run Description string `json:"description,omitempty"` - // effective_performance_target is the actual performance target used by the - // run during execution. effective_performance_target can differ from the - // client-set performance_target depending on if the job was eligible to be - // cost-optimized. + // The actual performance target used by the serverless run during + // execution. This can differ from the client-set performance target on the + // request depending on whether the performance mode is supported by the job + // type. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. @@ -2754,7 +2905,7 @@ type Run struct { // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` - // Indicates if the run has more sub-resources (`tasks`, `job_clusters`) + // Indicates if the run has more array properties (`tasks`, `job_clusters`) // that are not shown. They can be accessed via :method:jobs/getrun // endpoint. It is only relevant for API 2.2 :method:jobs/listruns requests // with `expand_tasks=true`. @@ -2777,7 +2928,7 @@ type Run struct { // field is populated with the ID of the job run that the task run belongs // to. JobRunId int64 `json:"job_run_id,omitempty"` - // A token that can be used to list the next page of sub-resources. + // A token that can be used to list the next page of array properties. NextPageToken string `json:"next_page_token,omitempty"` // A unique identifier for this job run. This is set to the same value as // `run_id`. @@ -3245,9 +3396,14 @@ type RunNow struct { // A list of task keys to run inside of the job. If this field is not // provided, all tasks in the job will be run. Only []string `json:"only,omitempty"` - // PerformanceTarget defines how performant or cost efficient the execution - // of run on serverless compute should be. For RunNow, this performance - // target will override the target defined on the job-level. + // The performance mode on a serverless job. The performance target + // determines the level of compute performance or cost-efficiency for the + // run. This field overrides the performance target defined on the job + // level. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` @@ -3331,6 +3487,8 @@ func (s RunNowResponse) MarshalJSON() ([]byte, error) { type RunOutput struct { // The output of a clean rooms notebook task, if available CleanRoomsNotebookOutput *CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput `json:"clean_rooms_notebook_output,omitempty"` + // The output of a dashboard task, if available + DashboardOutput *DashboardTaskOutput `json:"dashboard_output,omitempty"` // The output of a dbt task, if available. DbtOutput *DbtOutput `json:"dbt_output,omitempty"` // An error message indicating why a task failed or why output is not @@ -3599,6 +3757,8 @@ type RunTask struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *RunConditionTask `json:"condition_task,omitempty"` + // The task runs a DashboardTask when the `dashboard_task` field is present. + DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use // a serverless or a pro SQL warehouse. @@ -3610,13 +3770,16 @@ type RunTask struct { DependsOn []TaskDependency `json:"depends_on,omitempty"` // An optional description for this task. Description string `json:"description,omitempty"` - // Denotes whether or not the task was disabled by the user. Disabled tasks - // do not execute and are immediately skipped as soon as they are unblocked. + // Deprecated, field was never used in production. Disabled bool `json:"disabled,omitempty"` - // effective_performance_target is the actual performance target used by the - // run during execution. effective_performance_target can differ from the - // client-set performance_target depending on if the job was eligible to be - // cost-optimized. + // The actual performance target used by the serverless run during + // execution. This can differ from the client-set performance target on the + // request depending on whether the performance mode is supported by the job + // type. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. @@ -3644,7 +3807,7 @@ type RunTask struct { // The task executes a nested task for every input provided when the // `for_each_task` field is present. ForEachTask *RunForEachTask `json:"for_each_task,omitempty"` - // Next field: 9 + GenAiComputeTask *GenAiComputeTask `json:"gen_ai_compute_task,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by @@ -3673,6 +3836,9 @@ type RunTask struct { // The task triggers a pipeline update when the `pipeline_task` field is // present. Only pipelines configured to use triggered more are supported. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` + // The task triggers a Power BI semantic model update when the + // `power_bi_task` field is present. + PowerBiTask *PowerBiTask `json:"power_bi_task,omitempty"` // The task runs a Python wheel when the `python_wheel_task` field is // present. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` @@ -4210,6 +4376,35 @@ func (s SqlTaskSubscription) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type StorageMode string + +const StorageModeDirectQuery StorageMode = `DIRECT_QUERY` + +const StorageModeDual StorageMode = `DUAL` + +const StorageModeImport StorageMode = `IMPORT` + +// String representation for [fmt.Print] +func (f *StorageMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *StorageMode) Set(v string) error { + switch v { + case `DIRECT_QUERY`, `DUAL`, `IMPORT`: + *f = StorageMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DIRECT_QUERY", "DUAL", "IMPORT"`, v) + } +} + +// Type always returns StorageMode to satisfy [pflag.Value] interface +func (f *StorageMode) Type() string { + return "StorageMode" +} + type SubmitRun struct { // List of permissions to set on the job. AccessControlList []JobAccessControlRequest `json:"access_control_list,omitempty"` @@ -4309,6 +4504,8 @@ type SubmitTask struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` + // The task runs a DashboardTask when the `dashboard_task` field is present. + DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use // a serverless or a pro SQL warehouse. @@ -4335,7 +4532,7 @@ type SubmitTask struct { // The task executes a nested task for every input provided when the // `for_each_task` field is present. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` - // Next field: 9 + GenAiComputeTask *GenAiComputeTask `json:"gen_ai_compute_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` @@ -4354,6 +4551,9 @@ type SubmitTask struct { // The task triggers a pipeline update when the `pipeline_task` field is // present. Only pipelines configured to use triggered more are supported. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` + // The task triggers a Power BI semantic model update when the + // `power_bi_task` field is present. + PowerBiTask *PowerBiTask `json:"power_bi_task,omitempty"` // The task runs a Python wheel when the `python_wheel_task` field is // present. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` @@ -4415,6 +4615,42 @@ func (s SubmitTask) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type Subscription struct { + // Optional: Allows users to specify a custom subject line on the email sent + // to subscribers. + CustomSubject string `json:"custom_subject,omitempty"` + // When true, the subscription will not send emails. + Paused bool `json:"paused,omitempty"` + + Subscribers []SubscriptionSubscriber `json:"subscribers,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *Subscription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Subscription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SubscriptionSubscriber struct { + DestinationId string `json:"destination_id,omitempty"` + + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *SubscriptionSubscriber) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SubscriptionSubscriber) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type TableUpdateTriggerConfiguration struct { // The table(s) condition based on which to trigger a job run. Condition Condition `json:"condition,omitempty"` @@ -4453,6 +4689,8 @@ type Task struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` + // The task runs a DashboardTask when the `dashboard_task` field is present. + DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use // a serverless or a pro SQL warehouse. @@ -4482,7 +4720,7 @@ type Task struct { // The task executes a nested task for every input provided when the // `for_each_task` field is present. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` - // Next field: 9 + GenAiComputeTask *GenAiComputeTask `json:"gen_ai_compute_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` @@ -4513,6 +4751,9 @@ type Task struct { // The task triggers a pipeline update when the `pipeline_task` field is // present. Only pipelines configured to use triggered more are supported. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` + // The task triggers a Power BI semantic model update when the + // `power_bi_task` field is present. + PowerBiTask *PowerBiTask `json:"power_bi_task,omitempty"` // The task runs a Python wheel when the `python_wheel_task` field is // present. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` @@ -5168,3 +5409,17 @@ type WebhookNotifications struct { // the `on_success` property. OnSuccess []Webhook `json:"on_success,omitempty"` } + +type WidgetErrorDetail struct { + Message string `json:"message,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *WidgetErrorDetail) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WidgetErrorDetail) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/service/ml/api.go b/service/ml/api.go index 99ecc63d5..25fd7909b 100755 --- a/service/ml/api.go +++ b/service/ml/api.go @@ -72,6 +72,18 @@ type ExperimentsInterface interface { // exists. GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentByNameResponse, error) + // Get credentials to download trace data. + GetCredentialsForTraceDataDownload(ctx context.Context, request GetCredentialsForTraceDataDownloadRequest) (*GetCredentialsForTraceDataDownloadResponse, error) + + // Get credentials to download trace data. + GetCredentialsForTraceDataDownloadByRequestId(ctx context.Context, requestId string) (*GetCredentialsForTraceDataDownloadResponse, error) + + // Get credentials to upload trace data. + GetCredentialsForTraceDataUpload(ctx context.Context, request GetCredentialsForTraceDataUploadRequest) (*GetCredentialsForTraceDataUploadResponse, error) + + // Get credentials to upload trace data. + GetCredentialsForTraceDataUploadByRequestId(ctx context.Context, requestId string) (*GetCredentialsForTraceDataUploadResponse, error) + // Get an experiment. // // Gets metadata for an experiment. This method works on deleted experiments. @@ -352,6 +364,20 @@ type ExperimentsAPI struct { experimentsImpl } +// Get credentials to download trace data. +func (a *ExperimentsAPI) GetCredentialsForTraceDataDownloadByRequestId(ctx context.Context, requestId string) (*GetCredentialsForTraceDataDownloadResponse, error) { + return a.experimentsImpl.GetCredentialsForTraceDataDownload(ctx, GetCredentialsForTraceDataDownloadRequest{ + RequestId: requestId, + }) +} + +// Get credentials to upload trace data. +func (a *ExperimentsAPI) GetCredentialsForTraceDataUploadByRequestId(ctx context.Context, requestId string) (*GetCredentialsForTraceDataUploadResponse, error) { + return a.experimentsImpl.GetCredentialsForTraceDataUpload(ctx, GetCredentialsForTraceDataUploadRequest{ + RequestId: requestId, + }) +} + // Get experiment permission levels. // // Gets the permission levels that a user can have on an object. diff --git a/service/ml/impl.go b/service/ml/impl.go index cb27bdc46..ce1d4c630 100755 --- a/service/ml/impl.go +++ b/service/ml/impl.go @@ -93,6 +93,26 @@ func (a *experimentsImpl) GetByName(ctx context.Context, request GetByNameReques return &getExperimentByNameResponse, err } +func (a *experimentsImpl) GetCredentialsForTraceDataDownload(ctx context.Context, request GetCredentialsForTraceDataDownloadRequest) (*GetCredentialsForTraceDataDownloadResponse, error) { + var getCredentialsForTraceDataDownloadResponse GetCredentialsForTraceDataDownloadResponse + path := fmt.Sprintf("/api/2.0/mlflow/traces/%v/credentials-for-data-download", request.RequestId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCredentialsForTraceDataDownloadResponse) + return &getCredentialsForTraceDataDownloadResponse, err +} + +func (a *experimentsImpl) GetCredentialsForTraceDataUpload(ctx context.Context, request GetCredentialsForTraceDataUploadRequest) (*GetCredentialsForTraceDataUploadResponse, error) { + var getCredentialsForTraceDataUploadResponse GetCredentialsForTraceDataUploadResponse + path := fmt.Sprintf("/api/2.0/mlflow/traces/%v/credentials-for-data-upload", request.RequestId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCredentialsForTraceDataUploadResponse) + return &getCredentialsForTraceDataUploadResponse, err +} + func (a *experimentsImpl) GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) { var getExperimentResponse GetExperimentResponse path := "/api/2.0/mlflow/experiments/get" diff --git a/service/ml/interface.go b/service/ml/interface.go index 2d90493d2..928e99553 100755 --- a/service/ml/interface.go +++ b/service/ml/interface.go @@ -75,6 +75,12 @@ type ExperimentsService interface { // exists. GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentByNameResponse, error) + // Get credentials to download trace data. + GetCredentialsForTraceDataDownload(ctx context.Context, request GetCredentialsForTraceDataDownloadRequest) (*GetCredentialsForTraceDataDownloadResponse, error) + + // Get credentials to upload trace data. + GetCredentialsForTraceDataUpload(ctx context.Context, request GetCredentialsForTraceDataUploadRequest) (*GetCredentialsForTraceDataUploadResponse, error) + // Get an experiment. // // Gets metadata for an experiment. This method works on deleted diff --git a/service/ml/model.go b/service/ml/model.go index 5573c0279..b0e0c0491 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -207,6 +207,82 @@ type ApproveTransitionRequestResponse struct { Activity *Activity `json:"activity,omitempty"` } +type ArtifactCredentialInfo struct { + // A collection of HTTP headers that should be specified when uploading to + // or downloading from the specified `signed_uri`. + Headers []ArtifactCredentialInfoHttpHeader `json:"headers,omitempty"` + // The path, relative to the Run's artifact root location, of the artifact + // that can be accessed with the credential. + Path string `json:"path,omitempty"` + // The ID of the MLflow Run containing the artifact that can be accessed + // with the credential. + RunId string `json:"run_id,omitempty"` + // The signed URI credential that provides access to the artifact. + SignedUri string `json:"signed_uri,omitempty"` + // The type of the signed credential URI (e.g., an AWS presigned URL or an + // Azure Shared Access Signature URI). + Type ArtifactCredentialType `json:"type,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ArtifactCredentialInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ArtifactCredentialInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ArtifactCredentialInfoHttpHeader struct { + // The HTTP header name. + Name string `json:"name,omitempty"` + // The HTTP header value. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ArtifactCredentialInfoHttpHeader) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ArtifactCredentialInfoHttpHeader) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of a given artifact access credential +type ArtifactCredentialType string + +const ArtifactCredentialTypeAwsPresignedUrl ArtifactCredentialType = `AWS_PRESIGNED_URL` + +const ArtifactCredentialTypeAzureAdlsGen2SasUri ArtifactCredentialType = `AZURE_ADLS_GEN2_SAS_URI` + +const ArtifactCredentialTypeAzureSasUri ArtifactCredentialType = `AZURE_SAS_URI` + +const ArtifactCredentialTypeGcpSignedUrl ArtifactCredentialType = `GCP_SIGNED_URL` + +// String representation for [fmt.Print] +func (f *ArtifactCredentialType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ArtifactCredentialType) Set(v string) error { + switch v { + case `AWS_PRESIGNED_URL`, `AZURE_ADLS_GEN2_SAS_URI`, `AZURE_SAS_URI`, `GCP_SIGNED_URL`: + *f = ArtifactCredentialType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AWS_PRESIGNED_URL", "AZURE_ADLS_GEN2_SAS_URI", "AZURE_SAS_URI", "GCP_SIGNED_URL"`, v) + } +} + +// Type always returns ArtifactCredentialType to satisfy [pflag.Value] interface +func (f *ArtifactCredentialType) Type() string { + return "ArtifactCredentialType" +} + // An action that a user (with sufficient permissions) could take on a comment. // Valid values are: * `EDIT_COMMENT`: Edit the comment // @@ -320,56 +396,60 @@ func (s CreateExperimentResponse) MarshalJSON() ([]byte, error) { } type CreateForecastingExperimentRequest struct { - // Name of the column in the input training table used to customize the - // weight for each time series to calculate weighted metrics. + // The column in the training table used to customize weights for each time + // series. CustomWeightsColumn string `json:"custom_weights_column,omitempty"` - // The path to the created experiment. This is the path where the experiment - // will be stored in the workspace. + // The path in the workspace to store the created experiment. ExperimentPath string `json:"experiment_path,omitempty"` - // The granularity of the forecast. This defines the time interval between - // consecutive rows in the time series data. Possible values: '1 second', '1 - // minute', '5 minutes', '10 minutes', '15 minutes', '30 minutes', 'Hourly', - // 'Daily', 'Weekly', 'Monthly', 'Quarterly', 'Yearly'. + // The time interval between consecutive rows in the time series data. + // Possible values include: '1 second', '1 minute', '5 minutes', '10 + // minutes', '15 minutes', '30 minutes', 'Hourly', 'Daily', 'Weekly', + // 'Monthly', 'Quarterly', 'Yearly'. ForecastGranularity string `json:"forecast_granularity"` - // The number of time steps into the future for which predictions should be - // made. This value represents a multiple of forecast_granularity - // determining how far ahead the model will forecast. + // The number of time steps into the future to make predictions, calculated + // as a multiple of forecast_granularity. This value represents how far + // ahead the model should forecast. ForecastHorizon int64 `json:"forecast_horizon"` - // Region code(s) to consider when automatically adding holiday features. - // When empty, no holiday features are added. Only supports 1 holiday region - // for now. + // The region code(s) to automatically add holiday features. Currently + // supports only one region. HolidayRegions []string `json:"holiday_regions,omitempty"` - // The maximum duration in minutes for which the experiment is allowed to - // run. If the experiment exceeds this time limit it will be stopped - // automatically. + // Specifies the list of feature columns to include in model training. These + // columns must exist in the training data and be of type string, numerical, + // or boolean. If not specified, no additional features will be included. + // Note: Certain columns are automatically handled: - Automatically + // excluded: split_column, target_column, custom_weights_column. - + // Automatically included: time_column. + IncludeFeatures []string `json:"include_features,omitempty"` + // The maximum duration for the experiment in minutes. The experiment stops + // automatically if it exceeds this limit. MaxRuntime int64 `json:"max_runtime,omitempty"` - // The three-level (fully qualified) path to a unity catalog table. This - // table path serves to store the predictions. + // The fully qualified path of a Unity Catalog table, formatted as + // catalog_name.schema_name.table_name, used to store predictions. PredictionDataPath string `json:"prediction_data_path,omitempty"` // The evaluation metric used to optimize the forecasting model. PrimaryMetric string `json:"primary_metric,omitempty"` - // The three-level (fully qualified) path to a unity catalog model. This - // model path serves to store the best model. + // The fully qualified path of a Unity Catalog model, formatted as + // catalog_name.schema_name.model_name, used to store the best model. RegisterTo string `json:"register_to,omitempty"` - // Name of the column in the input training table used for custom data - // splits. The values in this column must be "train", "validate", or "test" - // to indicate which split each row belongs to. + // // The column in the training table used for custom data splits. Values + // must be 'train', 'validate', or 'test'. SplitColumn string `json:"split_column,omitempty"` - // Name of the column in the input training table that serves as the - // prediction target. The values in this column will be used as the ground - // truth for model training. + // The column in the input training table used as the prediction target for + // model training. The values in this column are used as the ground truth + // for model training. TargetColumn string `json:"target_column"` - // Name of the column in the input training table that represents the - // timestamp of each row. + // The column in the input training table that represents each row's + // timestamp. TimeColumn string `json:"time_column"` - // Name of the column in the input training table used to group the dataset - // to predict individual time series + // The column in the training table used to group the dataset for predicting + // individual time series. TimeseriesIdentifierColumns []string `json:"timeseries_identifier_columns,omitempty"` - // The three-level (fully qualified) name of a unity catalog table. This - // table serves as the training data for the forecasting model. + // The fully qualified name of a Unity Catalog table, formatted as + // catalog_name.schema_name.table_name, used as training data for the + // forecasting model. TrainDataPath string `json:"train_data_path"` - // The list of frameworks to include for model tuning. Possible values: - // 'Prophet', 'ARIMA', 'DeepAR'. An empty list will include all supported + // List of frameworks to include for model tuning. Possible values are + // 'Prophet', 'ARIMA', 'DeepAR'. An empty list includes all supported // frameworks. TrainingFrameworks []string `json:"training_frameworks,omitempty"` @@ -1015,11 +1095,11 @@ func (s ExperimentTag) MarshalJSON() ([]byte, error) { // Metadata of a single artifact file or directory. type FileInfo struct { - // Size in bytes. Unset for directories. + // The size in bytes of the file. Unset for directories. FileSize int64 `json:"file_size,omitempty"` // Whether the path is a directory. IsDir bool `json:"is_dir,omitempty"` - // Path relative to the root artifact directory run. + // The path relative to the root artifact directory run. Path string `json:"path,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1093,6 +1173,28 @@ type GetByNameRequest struct { ExperimentName string `json:"-" url:"experiment_name"` } +// Get credentials to download trace data +type GetCredentialsForTraceDataDownloadRequest struct { + // The ID of the trace to fetch artifact download credentials for. + RequestId string `json:"-" url:"-"` +} + +type GetCredentialsForTraceDataDownloadResponse struct { + // The artifact download credentials for the specified trace data. + CredentialInfo *ArtifactCredentialInfo `json:"credential_info,omitempty"` +} + +// Get credentials to upload trace data +type GetCredentialsForTraceDataUploadRequest struct { + // The ID of the trace to fetch artifact upload credentials for. + RequestId string `json:"-" url:"-"` +} + +type GetCredentialsForTraceDataUploadResponse struct { + // The artifact upload credentials for the specified trace data. + CredentialInfo *ArtifactCredentialInfo `json:"credential_info,omitempty"` +} + type GetExperimentByNameResponse struct { // Experiment details. Experiment *Experiment `json:"experiment,omitempty"` @@ -1387,8 +1489,8 @@ func (s JobSpecWithoutSecret) MarshalJSON() ([]byte, error) { // List artifacts type ListArtifactsRequest struct { - // Token indicating the page of artifact results to fetch. `page_token` is - // not supported when listing artifacts in UC Volumes. A maximum of 1000 + // The token indicating the page of artifact results to fetch. `page_token` + // is not supported when listing artifacts in UC Volumes. A maximum of 1000 // artifacts will be retrieved for UC Volumes. Please call // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC // Volumes, which supports pagination. See [List directory contents | Files @@ -1415,11 +1517,11 @@ func (s ListArtifactsRequest) MarshalJSON() ([]byte, error) { } type ListArtifactsResponse struct { - // File location and metadata for artifacts. + // The file location and metadata for artifacts. Files []FileInfo `json:"files,omitempty"` - // Token that can be used to retrieve the next page of artifact results + // The token that can be used to retrieve the next page of artifact results. NextPageToken string `json:"next_page_token,omitempty"` - // Root artifact directory for the run. + // The root artifact directory for the run. RootUri string `json:"root_uri,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1595,6 +1697,8 @@ type LogBatchResponse struct { type LogInputs struct { // Dataset inputs Datasets []DatasetInput `json:"datasets,omitempty"` + // Model inputs + Models []ModelInput `json:"models,omitempty"` // ID of the run to log under RunId string `json:"run_id"` } @@ -1603,8 +1707,17 @@ type LogInputsResponse struct { } type LogMetric struct { + // Dataset digest of the dataset associated with the metric, e.g. an md5 + // hash of the dataset that uniquely identifies it within datasets of the + // same name. + DatasetDigest string `json:"dataset_digest,omitempty"` + // The name of the dataset associated with the metric. E.g. + // “my.uc.table@2” “nyc-taxi-dataset”, “fantastic-elk-3” + DatasetName string `json:"dataset_name,omitempty"` // Name of the metric. Key string `json:"key"` + // ID of the logged model associated with the metric, if applicable + ModelId string `json:"model_id,omitempty"` // ID of the run under which to log the metric. Must be provided. RunId string `json:"run_id,omitempty"` // [Deprecated, use `run_id` instead] ID of the run under which to log the @@ -1678,13 +1791,25 @@ type LogParamResponse struct { // Metric associated with a run, represented as a key-value pair. type Metric struct { - // Key identifying this metric. + // The dataset digest of the dataset associated with the metric, e.g. an md5 + // hash of the dataset that uniquely identifies it within datasets of the + // same name. + DatasetDigest string `json:"dataset_digest,omitempty"` + // The name of the dataset associated with the metric. E.g. + // “my.uc.table@2” “nyc-taxi-dataset”, “fantastic-elk-3” + DatasetName string `json:"dataset_name,omitempty"` + // The key identifying the metric. Key string `json:"key,omitempty"` - // Step at which to log the metric. + // The ID of the logged model or registered model version associated with + // the metric, if applicable. + ModelId string `json:"model_id,omitempty"` + // The ID of the run containing the metric. + RunId string `json:"run_id,omitempty"` + // The step at which the metric was logged. Step int64 `json:"step,omitempty"` - // The timestamp at which this metric was recorded. + // The timestamp at which the metric was recorded. Timestamp int64 `json:"timestamp,omitempty"` - // Value associated with this metric. + // The value of the metric. Value float64 `json:"value,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1759,6 +1884,12 @@ func (s ModelDatabricks) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Represents a LoggedModel or Registered Model Version input to a Run. +type ModelInput struct { + // The unique identifier of the model. + ModelId string `json:"model_id"` +} + type ModelTag struct { // The tag key. Key string `json:"key,omitempty"` @@ -2479,6 +2610,11 @@ func (f *RunInfoStatus) Type() string { type RunInputs struct { // Run metrics. DatasetInputs []DatasetInput `json:"dataset_inputs,omitempty"` + // **NOTE**: Experimental: This API field may change or be removed in a + // future release without warning. + // + // Model inputs to the Run. + ModelInputs []ModelInput `json:"model_inputs,omitempty"` } // Tag for a run. diff --git a/service/pipelines/api.go b/service/pipelines/api.go index be5ff8121..68651fed6 100755 --- a/service/pipelines/api.go +++ b/service/pipelines/api.go @@ -16,10 +16,6 @@ import ( type PipelinesInterface interface { - // WaitGetPipelineRunning repeatedly calls [PipelinesAPI.Get] and waits to reach RUNNING state - WaitGetPipelineRunning(ctx context.Context, pipelineId string, - timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) - // WaitGetPipelineIdle repeatedly calls [PipelinesAPI.Get] and waits to reach IDLE state WaitGetPipelineIdle(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) @@ -205,60 +201,6 @@ type PipelinesAPI struct { pipelinesImpl } -// WaitGetPipelineRunning repeatedly calls [PipelinesAPI.Get] and waits to reach RUNNING state -func (a *PipelinesAPI) WaitGetPipelineRunning(ctx context.Context, pipelineId string, - timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "long-running") - return retries.Poll[GetPipelineResponse](ctx, timeout, func() (*GetPipelineResponse, *retries.Err) { - getPipelineResponse, err := a.Get(ctx, GetPipelineRequest{ - PipelineId: pipelineId, - }) - if err != nil { - return nil, retries.Halt(err) - } - if callback != nil { - callback(getPipelineResponse) - } - status := getPipelineResponse.State - statusMessage := getPipelineResponse.Cause - switch status { - case PipelineStateRunning: // target state - return getPipelineResponse, nil - case PipelineStateFailed: - err := fmt.Errorf("failed to reach %s, got %s: %s", - PipelineStateRunning, status, statusMessage) - return nil, retries.Halt(err) - default: - return nil, retries.Continues(statusMessage) - } - }) -} - -// WaitGetPipelineRunning is a wrapper that calls [PipelinesAPI.WaitGetPipelineRunning] and waits to reach RUNNING state. -type WaitGetPipelineRunning[R any] struct { - Response *R - PipelineId string `json:"pipeline_id"` - Poll func(time.Duration, func(*GetPipelineResponse)) (*GetPipelineResponse, error) - callback func(*GetPipelineResponse) - timeout time.Duration -} - -// OnProgress invokes a callback every time it polls for the status update. -func (w *WaitGetPipelineRunning[R]) OnProgress(callback func(*GetPipelineResponse)) *WaitGetPipelineRunning[R] { - w.callback = callback - return w -} - -// Get the GetPipelineResponse with the default timeout of 20 minutes. -func (w *WaitGetPipelineRunning[R]) Get() (*GetPipelineResponse, error) { - return w.Poll(w.timeout, w.callback) -} - -// Get the GetPipelineResponse with custom timeout. -func (w *WaitGetPipelineRunning[R]) GetWithTimeout(timeout time.Duration) (*GetPipelineResponse, error) { - return w.Poll(timeout, w.callback) -} - // WaitGetPipelineIdle repeatedly calls [PipelinesAPI.Get] and waits to reach IDLE state func (a *PipelinesAPI) WaitGetPipelineIdle(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) { diff --git a/service/pkg.go b/service/pkg.go index 052bf33ae..95dbd2fba 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -52,10 +52,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. -// // - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. // +// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. @@ -82,8 +82,14 @@ // // - [settings.DisableLegacyFeaturesAPI]: Disable legacy features for new Databricks workspaces. // +// - [settings.EnableExportNotebookAPI]: Controls whether users can export notebooks and files from the Workspace. +// // - [settings.EnableIpAccessListsAPI]: Controls the enforcement of IP access lists for accessing the account console. // +// - [settings.EnableNotebookTableClipboardAPI]: Controls whether users can copy tabular data to the clipboard via the UI. +// +// - [settings.EnableResultsDownloadingAPI]: Controls whether users can download notebook results. +// // - [provisioning.EncryptionKeysAPI]: These APIs manage encryption key configurations for this workspace (optional). // // - [settings.EnhancedSecurityMonitoringAPI]: Controls whether enhanced security monitoring is enabled for the current workspace. @@ -339,8 +345,8 @@ var ( _ *marketplace.ConsumerListingsAPI = nil _ *marketplace.ConsumerPersonalizationRequestsAPI = nil _ *marketplace.ConsumerProvidersAPI = nil - _ *catalog.CredentialsAPI = nil _ *provisioning.CredentialsAPI = nil + _ *catalog.CredentialsAPI = nil _ *settings.CredentialsManagerAPI = nil _ *settings.CspEnablementAccountAPI = nil _ *iam.CurrentUserAPI = nil @@ -354,7 +360,10 @@ var ( _ *settings.DisableLegacyAccessAPI = nil _ *settings.DisableLegacyDbfsAPI = nil _ *settings.DisableLegacyFeaturesAPI = nil + _ *settings.EnableExportNotebookAPI = nil _ *settings.EnableIpAccessListsAPI = nil + _ *settings.EnableNotebookTableClipboardAPI = nil + _ *settings.EnableResultsDownloadingAPI = nil _ *provisioning.EncryptionKeysAPI = nil _ *settings.EnhancedSecurityMonitoringAPI = nil _ *settings.EsmEnablementAccountAPI = nil diff --git a/service/serving/api.go b/service/serving/api.go index 3d5a3a133..c5e91522c 100755 --- a/service/serving/api.go +++ b/service/serving/api.go @@ -137,15 +137,14 @@ type ServingEndpointsInterface interface { // Update rate limits of a serving endpoint. // - // Used to update the rate limits of a serving endpoint. NOTE: Only foundation - // model endpoints are currently supported. For external models, use AI Gateway - // to manage rate limits. + // Deprecated: Please use AI Gateway to manage rate limits instead. Put(ctx context.Context, request PutRequest) (*PutResponse, error) // Update AI Gateway of a serving endpoint. // - // Used to update the AI Gateway of a serving endpoint. NOTE: Only external - // model and provisioned throughput endpoints are currently supported. + // Used to update the AI Gateway of a serving endpoint. NOTE: External model, + // provisioned throughput, and pay-per-token endpoints are fully supported; + // agent endpoints currently only support inference tables. PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) // Query a serving endpoint. diff --git a/service/serving/interface.go b/service/serving/interface.go index ae4452c3f..0cf50e923 100755 --- a/service/serving/interface.go +++ b/service/serving/interface.go @@ -83,15 +83,14 @@ type ServingEndpointsService interface { // Update rate limits of a serving endpoint. // - // Used to update the rate limits of a serving endpoint. NOTE: Only - // foundation model endpoints are currently supported. For external models, - // use AI Gateway to manage rate limits. + // Deprecated: Please use AI Gateway to manage rate limits instead. Put(ctx context.Context, request PutRequest) (*PutResponse, error) // Update AI Gateway of a serving endpoint. // - // Used to update the AI Gateway of a serving endpoint. NOTE: Only external - // model and provisioned throughput endpoints are currently supported. + // Used to update the AI Gateway of a serving endpoint. NOTE: External + // model, provisioned throughput, and pay-per-token endpoints are fully + // supported; agent endpoints currently only support inference tables. PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) // Query a serving endpoint. diff --git a/service/serving/model.go b/service/serving/model.go index a52550167..502c8698c 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -506,9 +506,9 @@ func (s CohereConfig) MarshalJSON() ([]byte, error) { } type CreateServingEndpoint struct { - // The AI Gateway configuration for the serving endpoint. NOTE: Only - // external model and provisioned throughput endpoints are currently - // supported. + // The AI Gateway configuration for the serving endpoint. NOTE: External + // model, provisioned throughput, and pay-per-token endpoints are fully + // supported; agent endpoints currently only support inference tables. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The budget policy to be applied to the serving endpoint. BudgetPolicyId string `json:"budget_policy_id,omitempty"` @@ -1936,9 +1936,9 @@ type ServerLogsResponse struct { } type ServingEndpoint struct { - // The AI Gateway configuration for the serving endpoint. NOTE: Only - // external model and provisioned throughput endpoints are currently - // supported. + // The AI Gateway configuration for the serving endpoint. NOTE: External + // model, provisioned throughput, and pay-per-token endpoints are fully + // supported; agent endpoints currently only support inference tables. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The budget policy associated with the endpoint. BudgetPolicyId string `json:"budget_policy_id,omitempty"` @@ -2018,9 +2018,9 @@ func (s ServingEndpointAccessControlResponse) MarshalJSON() ([]byte, error) { } type ServingEndpointDetailed struct { - // The AI Gateway configuration for the serving endpoint. NOTE: Only - // external model and provisioned throughput endpoints are currently - // supported. + // The AI Gateway configuration for the serving endpoint. NOTE: External + // model, provisioned throughput, and pay-per-token endpoints are fully + // supported; agent endpoints currently only support inference tables. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The budget policy associated with the endpoint. BudgetPolicyId string `json:"budget_policy_id,omitempty"` diff --git a/service/settings/api.go b/service/settings/api.go index 039558db5..efbd46945 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Ip Access Lists, Account Settings, Aibi Dashboard Embedding Access Policy, Aibi Dashboard Embedding Approved Domains, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Dbfs, Disable Legacy Features, Enable Ip Access Lists, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. +// These APIs allow you to manage Account Ip Access Lists, Account Settings, Aibi Dashboard Embedding Access Policy, Aibi Dashboard Embedding Approved Domains, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Dbfs, Disable Legacy Features, Enable Export Notebook, Enable Ip Access Lists, Enable Notebook Table Clipboard, Enable Results Downloading, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. package settings import ( @@ -626,11 +626,10 @@ func NewDisableLegacyAccess(client *client.DatabricksClient) *DisableLegacyAcces // 'Disabling legacy access' has the following impacts: // -// 1. Disables direct access to the Hive Metastore. However, you can still -// access Hive Metastore through HMS Federation. 2. Disables Fallback Mode (docs -// link) on any External Location access from the workspace. 3. Alters DBFS path -// access to use External Location permissions in place of legacy credentials. -// 4. Enforces Unity Catalog access on all path based access. +// 1. Disables direct access to Hive Metastores from the workspace. However, you +// can still access a Hive Metastore through Hive Metastore federation. 2. +// Disables fallback mode on external location access from the workspace. 3. +// Disables Databricks Runtime versions prior to 13.3LTS. type DisableLegacyAccessAPI struct { disableLegacyAccessImpl } @@ -705,6 +704,35 @@ type DisableLegacyFeaturesAPI struct { disableLegacyFeaturesImpl } +type EnableExportNotebookInterface interface { + + // Get the Enable Export Notebook setting. + // + // Gets the Enable Export Notebook setting. + GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) + + // Update the Enable Export Notebook setting. + // + // Updates the Enable Export Notebook setting. The model follows eventual + // consistency, which means the get after the update operation might receive + // stale values for some time. + PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) +} + +func NewEnableExportNotebook(client *client.DatabricksClient) *EnableExportNotebookAPI { + return &EnableExportNotebookAPI{ + enableExportNotebookImpl: enableExportNotebookImpl{ + client: client, + }, + } +} + +// Controls whether users can export notebooks and files from the Workspace. By +// default, this setting is enabled. +type EnableExportNotebookAPI struct { + enableExportNotebookImpl +} + type EnableIpAccessListsInterface interface { // Delete the account IP access toggle setting. @@ -738,6 +766,64 @@ type EnableIpAccessListsAPI struct { enableIpAccessListsImpl } +type EnableNotebookTableClipboardInterface interface { + + // Get the Enable Notebook Table Clipboard setting. + // + // Gets the Enable Notebook Table Clipboard setting. + GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) + + // Update the Enable Notebook Table Clipboard setting. + // + // Updates the Enable Notebook Table Clipboard setting. The model follows + // eventual consistency, which means the get after the update operation might + // receive stale values for some time. + PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) +} + +func NewEnableNotebookTableClipboard(client *client.DatabricksClient) *EnableNotebookTableClipboardAPI { + return &EnableNotebookTableClipboardAPI{ + enableNotebookTableClipboardImpl: enableNotebookTableClipboardImpl{ + client: client, + }, + } +} + +// Controls whether users can copy tabular data to the clipboard via the UI. By +// default, this setting is enabled. +type EnableNotebookTableClipboardAPI struct { + enableNotebookTableClipboardImpl +} + +type EnableResultsDownloadingInterface interface { + + // Get the Enable Results Downloading setting. + // + // Gets the Enable Results Downloading setting. + GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) + + // Update the Enable Results Downloading setting. + // + // Updates the Enable Results Downloading setting. The model follows eventual + // consistency, which means the get after the update operation might receive + // stale values for some time. + PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) +} + +func NewEnableResultsDownloading(client *client.DatabricksClient) *EnableResultsDownloadingAPI { + return &EnableResultsDownloadingAPI{ + enableResultsDownloadingImpl: enableResultsDownloadingImpl{ + client: client, + }, + } +} + +// Controls whether users can download notebook results. By default, this +// setting is enabled. +type EnableResultsDownloadingAPI struct { + enableResultsDownloadingImpl +} + type EnhancedSecurityMonitoringInterface interface { // Get the enhanced security monitoring setting. @@ -1403,11 +1489,10 @@ type SettingsInterface interface { // 'Disabling legacy access' has the following impacts: // - // 1. Disables direct access to the Hive Metastore. However, you can still - // access Hive Metastore through HMS Federation. 2. Disables Fallback Mode - // (docs link) on any External Location access from the workspace. 3. Alters - // DBFS path access to use External Location permissions in place of legacy - // credentials. 4. Enforces Unity Catalog access on all path based access. + // 1. Disables direct access to Hive Metastores from the workspace. However, + // you can still access a Hive Metastore through Hive Metastore federation. + // 2. Disables fallback mode on external location access from the workspace. + // 3. Disables Databricks Runtime versions prior to 13.3LTS. DisableLegacyAccess() DisableLegacyAccessInterface // When this setting is on, access to DBFS root and DBFS mounts is @@ -1415,6 +1500,18 @@ type SettingsInterface interface { // all DBFS functionality is enabled DisableLegacyDbfs() DisableLegacyDbfsInterface + // Controls whether users can export notebooks and files from the Workspace. + // By default, this setting is enabled. + EnableExportNotebook() EnableExportNotebookInterface + + // Controls whether users can copy tabular data to the clipboard via the UI. + // By default, this setting is enabled. + EnableNotebookTableClipboard() EnableNotebookTableClipboardInterface + + // Controls whether users can download notebook results. By default, this + // setting is enabled. + EnableResultsDownloading() EnableResultsDownloadingInterface + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the @@ -1460,6 +1557,12 @@ func NewSettings(client *client.DatabricksClient) *SettingsAPI { disableLegacyDbfs: NewDisableLegacyDbfs(client), + enableExportNotebook: NewEnableExportNotebook(client), + + enableNotebookTableClipboard: NewEnableNotebookTableClipboard(client), + + enableResultsDownloading: NewEnableResultsDownloading(client), + enhancedSecurityMonitoring: NewEnhancedSecurityMonitoring(client), restrictWorkspaceAdmins: NewRestrictWorkspaceAdmins(client), @@ -1509,11 +1612,10 @@ type SettingsAPI struct { // 'Disabling legacy access' has the following impacts: // - // 1. Disables direct access to the Hive Metastore. However, you can still - // access Hive Metastore through HMS Federation. 2. Disables Fallback Mode - // (docs link) on any External Location access from the workspace. 3. Alters - // DBFS path access to use External Location permissions in place of legacy - // credentials. 4. Enforces Unity Catalog access on all path based access. + // 1. Disables direct access to Hive Metastores from the workspace. However, + // you can still access a Hive Metastore through Hive Metastore federation. + // 2. Disables fallback mode on external location access from the workspace. + // 3. Disables Databricks Runtime versions prior to 13.3LTS. disableLegacyAccess DisableLegacyAccessInterface // When this setting is on, access to DBFS root and DBFS mounts is @@ -1521,6 +1623,18 @@ type SettingsAPI struct { // all DBFS functionality is enabled disableLegacyDbfs DisableLegacyDbfsInterface + // Controls whether users can export notebooks and files from the Workspace. + // By default, this setting is enabled. + enableExportNotebook EnableExportNotebookInterface + + // Controls whether users can copy tabular data to the clipboard via the UI. + // By default, this setting is enabled. + enableNotebookTableClipboard EnableNotebookTableClipboardInterface + + // Controls whether users can download notebook results. By default, this + // setting is enabled. + enableResultsDownloading EnableResultsDownloadingInterface + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the @@ -1574,6 +1688,18 @@ func (a *SettingsAPI) DisableLegacyDbfs() DisableLegacyDbfsInterface { return a.disableLegacyDbfs } +func (a *SettingsAPI) EnableExportNotebook() EnableExportNotebookInterface { + return a.enableExportNotebook +} + +func (a *SettingsAPI) EnableNotebookTableClipboard() EnableNotebookTableClipboardInterface { + return a.enableNotebookTableClipboard +} + +func (a *SettingsAPI) EnableResultsDownloading() EnableResultsDownloadingInterface { + return a.enableResultsDownloading +} + func (a *SettingsAPI) EnhancedSecurityMonitoring() EnhancedSecurityMonitoringInterface { return a.enhancedSecurityMonitoring } diff --git a/service/settings/impl.go b/service/settings/impl.go index ec11fe6ac..7590bdf0d 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -422,6 +422,32 @@ func (a *disableLegacyFeaturesImpl) Update(ctx context.Context, request UpdateDi return &disableLegacyFeatures, err } +// unexported type that holds implementations of just EnableExportNotebook API methods +type enableExportNotebookImpl struct { + client *client.DatabricksClient +} + +func (a *enableExportNotebookImpl) GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) { + var enableExportNotebook EnableExportNotebook + path := "/api/2.0/settings/types/enable-export-notebook/names/default" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &enableExportNotebook) + return &enableExportNotebook, err +} + +func (a *enableExportNotebookImpl) PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) { + var enableExportNotebook EnableExportNotebook + path := "/api/2.0/settings/types/enable-export-notebook/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &enableExportNotebook) + return &enableExportNotebook, err +} + // unexported type that holds implementations of just EnableIpAccessLists API methods type enableIpAccessListsImpl struct { client *client.DatabricksClient @@ -458,6 +484,58 @@ func (a *enableIpAccessListsImpl) Update(ctx context.Context, request UpdateAcco return &accountIpAccessEnable, err } +// unexported type that holds implementations of just EnableNotebookTableClipboard API methods +type enableNotebookTableClipboardImpl struct { + client *client.DatabricksClient +} + +func (a *enableNotebookTableClipboardImpl) GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) { + var enableNotebookTableClipboard EnableNotebookTableClipboard + path := "/api/2.0/settings/types/enable-notebook-table-clipboard/names/default" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &enableNotebookTableClipboard) + return &enableNotebookTableClipboard, err +} + +func (a *enableNotebookTableClipboardImpl) PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) { + var enableNotebookTableClipboard EnableNotebookTableClipboard + path := "/api/2.0/settings/types/enable-notebook-table-clipboard/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &enableNotebookTableClipboard) + return &enableNotebookTableClipboard, err +} + +// unexported type that holds implementations of just EnableResultsDownloading API methods +type enableResultsDownloadingImpl struct { + client *client.DatabricksClient +} + +func (a *enableResultsDownloadingImpl) GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) { + var enableResultsDownloading EnableResultsDownloading + path := "/api/2.0/settings/types/enable-results-downloading/names/default" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &enableResultsDownloading) + return &enableResultsDownloading, err +} + +func (a *enableResultsDownloadingImpl) PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) { + var enableResultsDownloading EnableResultsDownloading + path := "/api/2.0/settings/types/enable-results-downloading/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &enableResultsDownloading) + return &enableResultsDownloading, err +} + // unexported type that holds implementations of just EnhancedSecurityMonitoring API methods type enhancedSecurityMonitoringImpl struct { client *client.DatabricksClient diff --git a/service/settings/interface.go b/service/settings/interface.go index cb38b4641..f5faae367 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -275,11 +275,10 @@ type DefaultNamespaceService interface { // 'Disabling legacy access' has the following impacts: // -// 1. Disables direct access to the Hive Metastore. However, you can still -// access Hive Metastore through HMS Federation. 2. Disables Fallback Mode (docs -// link) on any External Location access from the workspace. 3. Alters DBFS path -// access to use External Location permissions in place of legacy credentials. -// 4. Enforces Unity Catalog access on all path based access. +// 1. Disables direct access to Hive Metastores from the workspace. However, you +// can still access a Hive Metastore through Hive Metastore federation. 2. +// Disables fallback mode on external location access from the workspace. 3. +// Disables Databricks Runtime versions prior to 13.3LTS. type DisableLegacyAccessService interface { // Delete Legacy Access Disablement Status. @@ -344,6 +343,23 @@ type DisableLegacyFeaturesService interface { Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) } +// Controls whether users can export notebooks and files from the Workspace. By +// default, this setting is enabled. +type EnableExportNotebookService interface { + + // Get the Enable Export Notebook setting. + // + // Gets the Enable Export Notebook setting. + GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) + + // Update the Enable Export Notebook setting. + // + // Updates the Enable Export Notebook setting. The model follows eventual + // consistency, which means the get after the update operation might receive + // stale values for some time. + PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) +} + // Controls the enforcement of IP access lists for accessing the account // console. Allowing you to enable or disable restricted access based on IP // addresses. @@ -365,6 +381,40 @@ type EnableIpAccessListsService interface { Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) } +// Controls whether users can copy tabular data to the clipboard via the UI. By +// default, this setting is enabled. +type EnableNotebookTableClipboardService interface { + + // Get the Enable Notebook Table Clipboard setting. + // + // Gets the Enable Notebook Table Clipboard setting. + GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) + + // Update the Enable Notebook Table Clipboard setting. + // + // Updates the Enable Notebook Table Clipboard setting. The model follows + // eventual consistency, which means the get after the update operation + // might receive stale values for some time. + PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) +} + +// Controls whether users can download notebook results. By default, this +// setting is enabled. +type EnableResultsDownloadingService interface { + + // Get the Enable Results Downloading setting. + // + // Gets the Enable Results Downloading setting. + GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) + + // Update the Enable Results Downloading setting. + // + // Updates the Enable Results Downloading setting. The model follows + // eventual consistency, which means the get after the update operation + // might receive stale values for some time. + PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) +} + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the compliance diff --git a/service/settings/model.go b/service/settings/model.go index f05607616..39daaf756 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -1422,6 +1422,66 @@ type EmailConfig struct { type Empty struct { } +type EnableExportNotebook struct { + BooleanVal *BooleanMessage `json:"boolean_val,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *EnableExportNotebook) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnableExportNotebook) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EnableNotebookTableClipboard struct { + BooleanVal *BooleanMessage `json:"boolean_val,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *EnableNotebookTableClipboard) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnableNotebookTableClipboard) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EnableResultsDownloading struct { + BooleanVal *BooleanMessage `json:"boolean_val,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *EnableResultsDownloading) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnableResultsDownloading) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // SHIELD feature: ESM type EnhancedSecurityMonitoring struct { IsEnabled bool `json:"is_enabled,omitempty"` @@ -3110,6 +3170,69 @@ type UpdateDisableLegacyFeaturesRequest struct { Setting DisableLegacyFeatures `json:"setting"` } +// Details required to update a setting. +type UpdateEnableExportNotebookRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting EnableExportNotebook `json:"setting"` +} + +// Details required to update a setting. +type UpdateEnableNotebookTableClipboardRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting EnableNotebookTableClipboard `json:"setting"` +} + +// Details required to update a setting. +type UpdateEnableResultsDownloadingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting EnableResultsDownloading `json:"setting"` +} + // Details required to update a setting. type UpdateEnhancedSecurityMonitoringSettingRequest struct { // This should always be set to true for Settings API. Added for AIP diff --git a/service/sql/model.go b/service/sql/model.go index 3d6336bf0..a74f2de93 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -3163,6 +3163,11 @@ type QueryFilter struct { type QueryInfo struct { // SQL Warehouse channel information at the time of query execution ChannelUsed *ChannelInfo `json:"channel_used,omitempty"` + // Client application that ran the statement. For example: Databricks SQL + // Editor, Tableau, and Power BI. This field is derived from information + // provided by client applications. While values are expected to remain + // static over time, this cannot be guaranteed. + ClientApplication string `json:"client_application,omitempty"` // Total execution time of the statement ( excluding result fetch time ). Duration int64 `json:"duration,omitempty"` // Alias for `warehouse_id`. @@ -4682,6 +4687,8 @@ const WarehousePermissionLevelCanMonitor WarehousePermissionLevel = `CAN_MONITOR const WarehousePermissionLevelCanUse WarehousePermissionLevel = `CAN_USE` +const WarehousePermissionLevelCanView WarehousePermissionLevel = `CAN_VIEW` + const WarehousePermissionLevelIsOwner WarehousePermissionLevel = `IS_OWNER` // String representation for [fmt.Print] @@ -4692,11 +4699,11 @@ func (f *WarehousePermissionLevel) String() string { // Set raw string value and validate it against allowed values func (f *WarehousePermissionLevel) Set(v string) error { switch v { - case `CAN_MANAGE`, `CAN_MONITOR`, `CAN_USE`, `IS_OWNER`: + case `CAN_MANAGE`, `CAN_MONITOR`, `CAN_USE`, `CAN_VIEW`, `IS_OWNER`: *f = WarehousePermissionLevel(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_MONITOR", "CAN_USE", "IS_OWNER"`, v) + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_MONITOR", "CAN_USE", "CAN_VIEW", "IS_OWNER"`, v) } }