diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index e7f752fb5..3b0b1fdac 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -ce962ccd0a078a5a9d89494fe38d237ce377d5f3 \ No newline at end of file +d4c86c045ee9d0410a41ef07e8ae708673b95fa1 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 3296c8f9f..679971617 100644 --- a/.gitattributes +++ b/.gitattributes @@ -131,6 +131,7 @@ experimental/mocks/service/sharing/mock_recipients_interface.go linguist-generat experimental/mocks/service/sharing/mock_shares_interface.go linguist-generated=true experimental/mocks/service/sql/mock_alerts_interface.go linguist-generated=true experimental/mocks/service/sql/mock_alerts_legacy_interface.go linguist-generated=true +experimental/mocks/service/sql/mock_alerts_v2_interface.go linguist-generated=true experimental/mocks/service/sql/mock_dashboard_widgets_interface.go linguist-generated=true experimental/mocks/service/sql/mock_dashboards_interface.go linguist-generated=true experimental/mocks/service/sql/mock_data_sources_interface.go linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 0fd0b1445..fc2d039d5 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -5,9 +5,27 @@ ### New Features and Improvements ### Bug Fixes +* Fixed the deserialization of responses in VectorSearchAPI's `QueryIndex()` method ([#1214](https://github.com/databricks/databricks-sdk-py/pull/1214)). ### Documentation ### Internal Changes ### API Changes +* Added `FutureFeatureDataPath` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* Added `ExcludeColumns` and `IncludeColumns` fields for [pipelines.TableSpecificConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpecificConfig). +* Added `NetworkCheckControlPlaneFailure`, `NetworkCheckDnsServerFailure`, `NetworkCheckMetadataEndpointFailure`, `NetworkCheckMultipleComponentsFailure`, `NetworkCheckNicFailure`, `NetworkCheckStorageFailure` and `SecretPermissionDenied` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* [Breaking] Changed [vectorsearch.ListValue](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValue) to. +* [Breaking] Changed `PipelineId` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline) to be required. +* Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. +* [Breaking] Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. +* [Breaking] Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. +* Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. +* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. +* Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. +* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. +* Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. +* Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. +* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. +* [Breaking] Changed pagination for [AlertsV2API.ListAlerts](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API.ListAlerts). +* [Breaking] Changed waiter for [GenieAPI.CreateMessage](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI.CreateMessage). diff --git a/experimental/mocks/service/sql/mock_alerts_v2_interface.go b/experimental/mocks/service/sql/mock_alerts_v2_interface.go index d48e31c99..91100ef51 100644 --- a/experimental/mocks/service/sql/mock_alerts_v2_interface.go +++ b/experimental/mocks/service/sql/mock_alerts_v2_interface.go @@ -24,6 +24,65 @@ func (_m *MockAlertsV2Interface) EXPECT() *MockAlertsV2Interface_Expecter { return &MockAlertsV2Interface_Expecter{mock: &_m.Mock} } +// AlertV2DisplayNameToIdMap provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) AlertV2DisplayNameToIdMap(ctx context.Context, request sql.ListAlertsV2Request) (map[string]string, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for AlertV2DisplayNameToIdMap") + } + + var r0 map[string]string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) map[string]string); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.ListAlertsV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AlertV2DisplayNameToIdMap' +type MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call struct { + *mock.Call +} + +// AlertV2DisplayNameToIdMap is a helper method to define mock.On call +// - ctx context.Context +// - request sql.ListAlertsV2Request +func (_e *MockAlertsV2Interface_Expecter) AlertV2DisplayNameToIdMap(ctx interface{}, request interface{}) *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call { + return &MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call{Call: _e.mock.On("AlertV2DisplayNameToIdMap", ctx, request)} +} + +func (_c *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call) Return(_a0 map[string]string, _a1 error) *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)) *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call { + _c.Call.Return(run) + return _c +} + // CreateAlert provides a mock function with given fields: ctx, request func (_m *MockAlertsV2Interface) CreateAlert(ctx context.Context, request sql.CreateAlertV2Request) (*sql.AlertV2, error) { ret := _m.Called(ctx, request) @@ -202,23 +261,23 @@ func (_c *MockAlertsV2Interface_GetAlertById_Call) RunAndReturn(run func(context } // GetByDisplayName provides a mock function with given fields: ctx, name -func (_m *MockAlertsV2Interface) GetByDisplayName(ctx context.Context, name string) (*sql.ListAlertsV2ResponseAlert, error) { +func (_m *MockAlertsV2Interface) GetByDisplayName(ctx context.Context, name string) (*sql.AlertV2, error) { ret := _m.Called(ctx, name) if len(ret) == 0 { panic("no return value specified for GetByDisplayName") } - var r0 *sql.ListAlertsV2ResponseAlert + var r0 *sql.AlertV2 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (*sql.ListAlertsV2ResponseAlert, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) (*sql.AlertV2, error)); ok { return rf(ctx, name) } - if rf, ok := ret.Get(0).(func(context.Context, string) *sql.ListAlertsV2ResponseAlert); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) *sql.AlertV2); ok { r0 = rf(ctx, name) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*sql.ListAlertsV2ResponseAlert) + r0 = ret.Get(0).(*sql.AlertV2) } } @@ -250,30 +309,30 @@ func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Run(run func(ctx context. return _c } -func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Return(_a0 *sql.ListAlertsV2ResponseAlert, _a1 error) *MockAlertsV2Interface_GetByDisplayName_Call { +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_GetByDisplayName_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockAlertsV2Interface_GetByDisplayName_Call) RunAndReturn(run func(context.Context, string) (*sql.ListAlertsV2ResponseAlert, error)) *MockAlertsV2Interface_GetByDisplayName_Call { +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) RunAndReturn(run func(context.Context, string) (*sql.AlertV2, error)) *MockAlertsV2Interface_GetByDisplayName_Call { _c.Call.Return(run) return _c } // ListAlerts provides a mock function with given fields: ctx, request -func (_m *MockAlertsV2Interface) ListAlerts(ctx context.Context, request sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert] { +func (_m *MockAlertsV2Interface) ListAlerts(ctx context.Context, request sql.ListAlertsV2Request) listing.Iterator[sql.AlertV2] { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for ListAlerts") } - var r0 listing.Iterator[sql.ListAlertsV2ResponseAlert] - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert]); ok { + var r0 listing.Iterator[sql.AlertV2] + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.AlertV2]); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(listing.Iterator[sql.ListAlertsV2ResponseAlert]) + r0 = ret.Get(0).(listing.Iterator[sql.AlertV2]) } } @@ -299,34 +358,34 @@ func (_c *MockAlertsV2Interface_ListAlerts_Call) Run(run func(ctx context.Contex return _c } -func (_c *MockAlertsV2Interface_ListAlerts_Call) Return(_a0 listing.Iterator[sql.ListAlertsV2ResponseAlert]) *MockAlertsV2Interface_ListAlerts_Call { +func (_c *MockAlertsV2Interface_ListAlerts_Call) Return(_a0 listing.Iterator[sql.AlertV2]) *MockAlertsV2Interface_ListAlerts_Call { _c.Call.Return(_a0) return _c } -func (_c *MockAlertsV2Interface_ListAlerts_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert]) *MockAlertsV2Interface_ListAlerts_Call { +func (_c *MockAlertsV2Interface_ListAlerts_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.AlertV2]) *MockAlertsV2Interface_ListAlerts_Call { _c.Call.Return(run) return _c } // ListAlertsAll provides a mock function with given fields: ctx, request -func (_m *MockAlertsV2Interface) ListAlertsAll(ctx context.Context, request sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error) { +func (_m *MockAlertsV2Interface) ListAlertsAll(ctx context.Context, request sql.ListAlertsV2Request) ([]sql.AlertV2, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for ListAlertsAll") } - var r0 []sql.ListAlertsV2ResponseAlert + var r0 []sql.AlertV2 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) ([]sql.AlertV2, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) []sql.ListAlertsV2ResponseAlert); ok { + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) []sql.AlertV2); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]sql.ListAlertsV2ResponseAlert) + r0 = ret.Get(0).([]sql.AlertV2) } } @@ -358,71 +417,12 @@ func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Run(run func(ctx context.Con return _c } -func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Return(_a0 []sql.ListAlertsV2ResponseAlert, _a1 error) *MockAlertsV2Interface_ListAlertsAll_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *MockAlertsV2Interface_ListAlertsAll_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error)) *MockAlertsV2Interface_ListAlertsAll_Call { - _c.Call.Return(run) - return _c -} - -// ListAlertsV2ResponseAlertDisplayNameToIdMap provides a mock function with given fields: ctx, request -func (_m *MockAlertsV2Interface) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request sql.ListAlertsV2Request) (map[string]string, error) { - ret := _m.Called(ctx, request) - - if len(ret) == 0 { - panic("no return value specified for ListAlertsV2ResponseAlertDisplayNameToIdMap") - } - - var r0 map[string]string - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)); ok { - return rf(ctx, request) - } - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) map[string]string); ok { - r0 = rf(ctx, request) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, sql.ListAlertsV2Request) error); ok { - r1 = rf(ctx, request) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAlertsV2ResponseAlertDisplayNameToIdMap' -type MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call struct { - *mock.Call -} - -// ListAlertsV2ResponseAlertDisplayNameToIdMap is a helper method to define mock.On call -// - ctx context.Context -// - request sql.ListAlertsV2Request -func (_e *MockAlertsV2Interface_Expecter) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx interface{}, request interface{}) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { - return &MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call{Call: _e.mock.On("ListAlertsV2ResponseAlertDisplayNameToIdMap", ctx, request)} -} - -func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) - }) - return _c -} - -func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) Return(_a0 map[string]string, _a1 error) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Return(_a0 []sql.AlertV2, _a1 error) *MockAlertsV2Interface_ListAlertsAll_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) ([]sql.AlertV2, error)) *MockAlertsV2Interface_ListAlertsAll_Call { _c.Call.Return(run) return _c } diff --git a/service/compute/model.go b/service/compute/model.go index 7125b7e2b..bf61fd0f1 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -5499,6 +5499,18 @@ const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEP const TerminationReasonCodeNetvisorSetupTimeout TerminationReasonCode = `NETVISOR_SETUP_TIMEOUT` +const TerminationReasonCodeNetworkCheckControlPlaneFailure TerminationReasonCode = `NETWORK_CHECK_CONTROL_PLANE_FAILURE` + +const TerminationReasonCodeNetworkCheckDnsServerFailure TerminationReasonCode = `NETWORK_CHECK_DNS_SERVER_FAILURE` + +const TerminationReasonCodeNetworkCheckMetadataEndpointFailure TerminationReasonCode = `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE` + +const TerminationReasonCodeNetworkCheckMultipleComponentsFailure TerminationReasonCode = `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE` + +const TerminationReasonCodeNetworkCheckNicFailure TerminationReasonCode = `NETWORK_CHECK_NIC_FAILURE` + +const TerminationReasonCodeNetworkCheckStorageFailure TerminationReasonCode = `NETWORK_CHECK_STORAGE_FAILURE` + const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE` const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE` @@ -5523,6 +5535,8 @@ const TerminationReasonCodeResourceUsageBlocked TerminationReasonCode = `RESOURC const TerminationReasonCodeSecretCreationFailure TerminationReasonCode = `SECRET_CREATION_FAILURE` +const TerminationReasonCodeSecretPermissionDenied TerminationReasonCode = `SECRET_PERMISSION_DENIED` + const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR` const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION` @@ -5595,11 +5609,11 @@ func (f *TerminationReasonCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationReasonCode) Set(v string) error { switch v { - case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: + case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE`, `NETWORK_CHECK_DNS_SERVER_FAILURE`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE`, `NETWORK_CHECK_NIC_FAILURE`, `NETWORK_CHECK_STORAGE_FAILURE`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_PERMISSION_DENIED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: *f = TerminationReasonCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CHECK_CONTROL_PLANE_FAILURE", "NETWORK_CHECK_DNS_SERVER_FAILURE", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE", "NETWORK_CHECK_NIC_FAILURE", "NETWORK_CHECK_STORAGE_FAILURE", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_PERMISSION_DENIED", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) } } diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 5e3771be4..3e34b55ac 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -249,10 +249,10 @@ func (a *GenieAPI) CreateMessage(ctx context.Context, genieCreateConversationMes return &WaitGetMessageGenieCompleted[GenieMessage]{ Response: genieMessage, ConversationId: genieCreateConversationMessageRequest.ConversationId, - MessageId: genieMessage.Id, + MessageId: genieMessage.MessageId, SpaceId: genieCreateConversationMessageRequest.SpaceId, Poll: func(timeout time.Duration, callback func(*GenieMessage)) (*GenieMessage, error) { - return a.WaitGetMessageGenieCompleted(ctx, genieCreateConversationMessageRequest.ConversationId, genieMessage.Id, genieCreateConversationMessageRequest.SpaceId, timeout, callback) + return a.WaitGetMessageGenieCompleted(ctx, genieCreateConversationMessageRequest.ConversationId, genieMessage.MessageId, genieCreateConversationMessageRequest.SpaceId, timeout, callback) }, timeout: 20 * time.Minute, callback: nil, diff --git a/service/files/model.go b/service/files/model.go index 1205e1281..3c406ade5 100755 --- a/service/files/model.go +++ b/service/files/model.go @@ -372,7 +372,8 @@ type UploadRequest struct { Contents io.ReadCloser `json:"-"` // The absolute path of the file. FilePath string `json:"-" url:"-"` - // If true, an existing file will be overwritten. + // If true or unspecified, an existing file will be overwritten. If false, + // an error will be returned if the path points to an existing file. Overwrite bool `json:"-" url:"overwrite,omitempty"` ForceSendFields []string `json:"-" url:"-"` diff --git a/service/ml/model.go b/service/ml/model.go index b0e0c0491..80d5f6c48 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -410,6 +410,10 @@ type CreateForecastingExperimentRequest struct { // as a multiple of forecast_granularity. This value represents how far // ahead the model should forecast. ForecastHorizon int64 `json:"forecast_horizon"` + // The fully qualified path of a Unity Catalog table, formatted as + // catalog_name.schema_name.table_name, used to store future feature data + // for predictions. + FutureFeatureDataPath string `json:"future_feature_data_path,omitempty"` // The region code(s) to automatically add holiday features. Currently // supports only one region. HolidayRegions []string `json:"holiday_regions,omitempty"` @@ -444,7 +448,7 @@ type CreateForecastingExperimentRequest struct { // The column in the training table used to group the dataset for predicting // individual time series. TimeseriesIdentifierColumns []string `json:"timeseries_identifier_columns,omitempty"` - // The fully qualified name of a Unity Catalog table, formatted as + // The fully qualified path of a Unity Catalog table, formatted as // catalog_name.schema_name.table_name, used as training data for the // forecasting model. TrainDataPath string `json:"train_data_path"` diff --git a/service/pipelines/impl.go b/service/pipelines/impl.go index 588d752fd..53232ecd0 100755 --- a/service/pipelines/impl.go +++ b/service/pipelines/impl.go @@ -110,8 +110,7 @@ func (a *pipelinesImpl) ListPipelineEvents(ctx context.Context, request ListPipe // Retrieves events for a pipeline. func (a *pipelinesImpl) ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error) { iterator := a.ListPipelineEvents(ctx, request) - return listing.ToSliceN[PipelineEvent, int](ctx, iterator, request.MaxResults) - + return listing.ToSlice[PipelineEvent](ctx, iterator) } func (a *pipelinesImpl) internalListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { @@ -156,8 +155,7 @@ func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelines // Lists pipelines defined in the Delta Live Tables system. func (a *pipelinesImpl) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) { iterator := a.ListPipelines(ctx, request) - return listing.ToSliceN[PipelineStateInfo, int](ctx, iterator, request.MaxResults) - + return listing.ToSlice[PipelineStateInfo](ctx, iterator) } func (a *pipelinesImpl) internalListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) { diff --git a/service/pipelines/model.go b/service/pipelines/model.go index b1c2795da..f7a6c092d 100755 --- a/service/pipelines/model.go +++ b/service/pipelines/model.go @@ -128,7 +128,7 @@ type DataPlaneId struct { // The instance name of the data plane emitting an event. Instance string `json:"instance,omitempty"` // A sequence number, unique and increasing within the data plane instance. - SeqNo int `json:"seq_no,omitempty"` + SeqNo int64 `json:"seq_no,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -266,7 +266,7 @@ type EditPipeline struct { // Whether Photon is enabled for this pipeline. Photon bool `json:"photon,omitempty"` // Unique identifier for this pipeline. - PipelineId string `json:"pipeline_id,omitempty" url:"-"` + PipelineId string `json:"-" url:"-"` // Restart window of this pipeline. RestartWindow *RestartWindow `json:"restart_window,omitempty"` // Write-only setting, available only in Create/Update calls. Specifies the @@ -374,7 +374,7 @@ func (s EventLogSpec) MarshalJSON() ([]byte, error) { } type FileLibrary struct { - // The absolute path of the file. + // The absolute path of the source code. Path string `json:"path,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -513,10 +513,10 @@ type IngestionGatewayPipelineDefinition struct { ConnectionId string `json:"connection_id,omitempty"` // Immutable. The Unity Catalog connection that this gateway pipeline uses // to communicate with the source. - ConnectionName string `json:"connection_name,omitempty"` + ConnectionName string `json:"connection_name"` // Required, Immutable. The name of the catalog for the gateway pipeline's // storage location. - GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"` + GatewayStorageCatalog string `json:"gateway_storage_catalog"` // Optional. The Unity Catalog-compatible name for the gateway storage // location. This is the destination to use for the data that is extracted // by the gateway. Delta Live Tables system will automatically create the @@ -524,7 +524,7 @@ type IngestionGatewayPipelineDefinition struct { GatewayStorageName string `json:"gateway_storage_name,omitempty"` // Required, Immutable. The name of the schema for the gateway pipelines's // storage location. - GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` + GatewayStorageSchema string `json:"gateway_storage_schema"` ForceSendFields []string `json:"-" url:"-"` } @@ -587,7 +587,7 @@ type ListPipelineEventsRequest struct { // with all fields in this request except max_results. An error is returned // if any fields other than max_results are set when this field is set. PageToken string `json:"-" url:"page_token,omitempty"` - + // The pipeline to return events for. PipelineId string `json:"-" url:"-"` ForceSendFields []string `json:"-" url:"-"` @@ -749,7 +749,7 @@ func (f *MaturityLevel) Type() string { } type NotebookLibrary struct { - // The absolute path of the notebook. + // The absolute path of the source code. Path string `json:"path,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -778,7 +778,7 @@ type Notifications struct { type Origin struct { // The id of a batch. Unique within a flow. - BatchId int `json:"batch_id,omitempty"` + BatchId int64 `json:"batch_id,omitempty"` // The cloud provider, e.g., AWS or Azure. Cloud string `json:"cloud,omitempty"` // The id of the cluster where an execution happens. Unique within a region. @@ -797,7 +797,7 @@ type Origin struct { // Materialization name. MaterializationName string `json:"materialization_name,omitempty"` // The org id of the user. Unique within a cloud. - OrgId int `json:"org_id,omitempty"` + OrgId int64 `json:"org_id,omitempty"` // The id of the pipeline. Globally unique. PipelineId string `json:"pipeline_id,omitempty"` // The name of the pipeline. Not unique. @@ -1025,7 +1025,7 @@ func (f *PipelineClusterAutoscaleMode) Type() string { type PipelineDeployment struct { // The deployment method that manages the pipeline. - Kind DeploymentKind `json:"kind,omitempty"` + Kind DeploymentKind `json:"kind"` // The path to the file containing metadata about the deployment. MetadataFilePath string `json:"metadata_file_path,omitempty"` @@ -1364,14 +1364,14 @@ type PipelineTrigger struct { type ReportSpec struct { // Required. Destination catalog to store table. - DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationCatalog string `json:"destination_catalog"` // Required. Destination schema to store table. - DestinationSchema string `json:"destination_schema,omitempty"` + DestinationSchema string `json:"destination_schema"` // Required. Destination table name. The pipeline fails if a table with that // name already exists. DestinationTable string `json:"destination_table,omitempty"` // Required. Report URL in the source system. - SourceUrl string `json:"source_url,omitempty"` + SourceUrl string `json:"source_url"` // Configuration settings to control the ingestion of tables. These settings // override the table_configuration defined in the // IngestionPipelineDefinition object. @@ -1440,16 +1440,16 @@ func (s RunAs) MarshalJSON() ([]byte, error) { type SchemaSpec struct { // Required. Destination catalog to store tables. - DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationCatalog string `json:"destination_catalog"` // Required. Destination schema to store tables in. Tables with the same // name as the source tables are created in this destination schema. The // pipeline fails If a table with the same name already exists. - DestinationSchema string `json:"destination_schema,omitempty"` + DestinationSchema string `json:"destination_schema"` // The source catalog name. Might be optional depending on the type of // source. SourceCatalog string `json:"source_catalog,omitempty"` // Required. Schema name in the source database. - SourceSchema string `json:"source_schema,omitempty"` + SourceSchema string `json:"source_schema"` // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in this schema and override the // table_configuration defined in the IngestionPipelineDefinition object. @@ -1468,7 +1468,7 @@ func (s SchemaSpec) MarshalJSON() ([]byte, error) { type Sequencing struct { // A sequence number, unique and increasing within the control plane. - ControlPlaneSeqNo int `json:"control_plane_seq_no,omitempty"` + ControlPlaneSeqNo int64 `json:"control_plane_seq_no,omitempty"` // the ID assigned by the data plane. DataPlaneId *DataPlaneId `json:"data_plane_id,omitempty"` @@ -1524,6 +1524,7 @@ func (s StackFrame) MarshalJSON() ([]byte, error) { } type StartUpdate struct { + // What triggered this update. Cause StartUpdateCause `json:"cause,omitempty"` // If true, this update will reset all tables before running. FullRefresh bool `json:"full_refresh,omitempty"` @@ -1554,6 +1555,7 @@ func (s StartUpdate) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// What triggered this update. type StartUpdateCause string const StartUpdateCauseApiCall StartUpdateCause = `API_CALL` @@ -1613,9 +1615,9 @@ type StopRequest struct { type TableSpec struct { // Required. Destination catalog to store table. - DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationCatalog string `json:"destination_catalog"` // Required. Destination schema to store table. - DestinationSchema string `json:"destination_schema,omitempty"` + DestinationSchema string `json:"destination_schema"` // Optional. Destination table name. The pipeline fails if a table with that // name already exists. If not set, the source table name is used. DestinationTable string `json:"destination_table,omitempty"` @@ -1625,7 +1627,7 @@ type TableSpec struct { // type of source. SourceSchema string `json:"source_schema,omitempty"` // Required. Table name in the source database. - SourceTable string `json:"source_table,omitempty"` + SourceTable string `json:"source_table"` // Configuration settings to control the ingestion of tables. These settings // override the table_configuration defined in the // IngestionPipelineDefinition object and the SchemaSpec. @@ -1643,6 +1645,18 @@ func (s TableSpec) MarshalJSON() ([]byte, error) { } type TableSpecificConfig struct { + // A list of column names to be excluded for the ingestion. When not + // specified, include_columns fully controls what columns to be ingested. + // When specified, all other columns including future ones will be + // automatically included for ingestion. This field in mutually exclusive + // with `include_columns`. + ExcludeColumns []string `json:"exclude_columns,omitempty"` + // A list of column names to be included for the ingestion. When not + // specified, all columns except ones in exclude_columns will be included. + // Future columns will be automatically included. When specified, all other + // future columns will be automatically excluded from ingestion. This field + // in mutually exclusive with `exclude_columns`. + IncludeColumns []string `json:"include_columns,omitempty"` // The primary key of the table used to apply changes. PrimaryKeys []string `json:"primary_keys,omitempty"` // If true, formula fields defined in the table are included in the @@ -1821,7 +1835,7 @@ func (f *UpdateInfoState) Type() string { type UpdateStateInfo struct { CreationTime string `json:"creation_time,omitempty"` - + // The update state. State UpdateStateInfoState `json:"state,omitempty"` UpdateId string `json:"update_id,omitempty"` @@ -1837,6 +1851,7 @@ func (s UpdateStateInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// The update state. type UpdateStateInfoState string const UpdateStateInfoStateCanceled UpdateStateInfoState = `CANCELED` diff --git a/service/pkg.go b/service/pkg.go index 1d327c7dc..751c65297 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -54,10 +54,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. -// // - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. // +// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. diff --git a/service/sql/api.go b/service/sql/api.go index a41db187c..eff8a812e 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -401,32 +401,32 @@ type AlertsV2Interface interface { // Gets a list of alerts accessible to the user, ordered by creation time. // // This method is generated by Databricks SDK Code Generator. - ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[ListAlertsV2ResponseAlert] + ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[AlertV2] // List alerts. // // Gets a list of alerts accessible to the user, ordered by creation time. // // This method is generated by Databricks SDK Code Generator. - ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]ListAlertsV2ResponseAlert, error) + ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]AlertV2, error) - // ListAlertsV2ResponseAlertDisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [ListAlertsV2ResponseAlert].DisplayName as key and [ListAlertsV2ResponseAlert].Id as value. + // AlertV2DisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [AlertV2].DisplayName as key and [AlertV2].Id as value. // - // Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. + // Returns an error if there's more than one [AlertV2] with the same .DisplayName. // - // Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before creating a map. + // Note: All [AlertV2] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. - ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) + AlertV2DisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) - // GetByDisplayName calls [AlertsV2API.ListAlertsV2ResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsV2ResponseAlert]. + // GetByDisplayName calls [AlertsV2API.AlertV2DisplayNameToIdMap] and returns a single [AlertV2]. // - // Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. + // Returns an error if there's more than one [AlertV2] with the same .DisplayName. // - // Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before returning matching by name. + // Note: All [AlertV2] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. - GetByDisplayName(ctx context.Context, name string) (*ListAlertsV2ResponseAlert, error) + GetByDisplayName(ctx context.Context, name string) (*AlertV2, error) // Delete an alert. // @@ -470,14 +470,14 @@ func (a *AlertsV2API) GetAlertById(ctx context.Context, id string) (*AlertV2, er }) } -// ListAlertsV2ResponseAlertDisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [ListAlertsV2ResponseAlert].DisplayName as key and [ListAlertsV2ResponseAlert].Id as value. +// AlertV2DisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [AlertV2].DisplayName as key and [AlertV2].Id as value. // -// Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. +// Returns an error if there's more than one [AlertV2] with the same .DisplayName. // -// Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before creating a map. +// Note: All [AlertV2] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *AlertsV2API) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) { +func (a *AlertsV2API) AlertV2DisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAlertsAll(ctx, request) @@ -495,30 +495,30 @@ func (a *AlertsV2API) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Co return mapping, nil } -// GetByDisplayName calls [AlertsV2API.ListAlertsV2ResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsV2ResponseAlert]. +// GetByDisplayName calls [AlertsV2API.AlertV2DisplayNameToIdMap] and returns a single [AlertV2]. // -// Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. +// Returns an error if there's more than one [AlertV2] with the same .DisplayName. // -// Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before returning matching by name. +// Note: All [AlertV2] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *AlertsV2API) GetByDisplayName(ctx context.Context, name string) (*ListAlertsV2ResponseAlert, error) { +func (a *AlertsV2API) GetByDisplayName(ctx context.Context, name string) (*AlertV2, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAlertsAll(ctx, ListAlertsV2Request{}) if err != nil { return nil, err } - tmp := map[string][]ListAlertsV2ResponseAlert{} + tmp := map[string][]AlertV2{} for _, v := range result { key := v.DisplayName tmp[key] = append(tmp[key], v) } alternatives, ok := tmp[name] if !ok || len(alternatives) == 0 { - return nil, fmt.Errorf("ListAlertsV2ResponseAlert named '%s' does not exist", name) + return nil, fmt.Errorf("AlertV2 named '%s' does not exist", name) } if len(alternatives) > 1 { - return nil, fmt.Errorf("there are %d instances of ListAlertsV2ResponseAlert named '%s'", len(alternatives), name) + return nil, fmt.Errorf("there are %d instances of AlertV2 named '%s'", len(alternatives), name) } return &alternatives[0], nil } diff --git a/service/sql/impl.go b/service/sql/impl.go index c43cd5e31..c10f98d3d 100755 --- a/service/sql/impl.go +++ b/service/sql/impl.go @@ -194,13 +194,13 @@ func (a *alertsV2Impl) GetAlert(ctx context.Context, request GetAlertV2Request) // List alerts. // // Gets a list of alerts accessible to the user, ordered by creation time. -func (a *alertsV2Impl) ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[ListAlertsV2ResponseAlert] { +func (a *alertsV2Impl) ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[AlertV2] { getNextPage := func(ctx context.Context, req ListAlertsV2Request) (*ListAlertsV2Response, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalListAlerts(ctx, req) } - getItems := func(resp *ListAlertsV2Response) []ListAlertsV2ResponseAlert { + getItems := func(resp *ListAlertsV2Response) []AlertV2 { return resp.Results } getNextReq := func(resp *ListAlertsV2Response) *ListAlertsV2Request { @@ -221,9 +221,9 @@ func (a *alertsV2Impl) ListAlerts(ctx context.Context, request ListAlertsV2Reque // List alerts. // // Gets a list of alerts accessible to the user, ordered by creation time. -func (a *alertsV2Impl) ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]ListAlertsV2ResponseAlert, error) { +func (a *alertsV2Impl) ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]AlertV2, error) { iterator := a.ListAlerts(ctx, request) - return listing.ToSlice[ListAlertsV2ResponseAlert](ctx, iterator) + return listing.ToSlice[AlertV2](ctx, iterator) } func (a *alertsV2Impl) internalListAlerts(ctx context.Context, request ListAlertsV2Request) (*ListAlertsV2Response, error) { diff --git a/service/sql/interface.go b/service/sql/interface.go index 5fcef16a6..631bccb98 100755 --- a/service/sql/interface.go +++ b/service/sql/interface.go @@ -130,7 +130,7 @@ type AlertsV2Service interface { // // Gets a list of alerts accessible to the user, ordered by creation time. // - // Use ListAlertsAll() to get all ListAlertsV2ResponseAlert instances, which will iterate over every result page. + // Use ListAlertsAll() to get all AlertV2 instances, which will iterate over every result page. ListAlerts(ctx context.Context, request ListAlertsV2Request) (*ListAlertsV2Response, error) // Delete an alert. diff --git a/service/sql/model.go b/service/sql/model.go index 9eca5ef2b..31f9f23fd 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -2811,7 +2811,7 @@ func (s ListAlertsV2Request) MarshalJSON() ([]byte, error) { type ListAlertsV2Response struct { NextPageToken string `json:"next_page_token,omitempty"` - Results []ListAlertsV2ResponseAlert `json:"results,omitempty"` + Results []AlertV2 `json:"results,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -2824,47 +2824,6 @@ func (s ListAlertsV2Response) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type ListAlertsV2ResponseAlert struct { - // The timestamp indicating when the alert was created. - CreateTime string `json:"create_time,omitempty"` - // Custom description for the alert. support mustache template. - CustomDescription string `json:"custom_description,omitempty"` - // Custom summary for the alert. support mustache template. - CustomSummary string `json:"custom_summary,omitempty"` - // The display name of the alert. - DisplayName string `json:"display_name,omitempty"` - - Evaluation *AlertV2Evaluation `json:"evaluation,omitempty"` - // UUID identifying the alert. - Id string `json:"id,omitempty"` - // Indicates whether the query is trashed. - LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` - // The owner's username. This field is set to "Unavailable" if the user has - // been deleted. - OwnerUserName string `json:"owner_user_name,omitempty"` - // Text of the query to be run. - QueryText string `json:"query_text,omitempty"` - // The run as username. This field is set to "Unavailable" if the user has - // been deleted. - RunAsUserName string `json:"run_as_user_name,omitempty"` - - Schedule *CronSchedule `json:"schedule,omitempty"` - // The timestamp indicating when the alert was updated. - UpdateTime string `json:"update_time,omitempty"` - // ID of the SQL warehouse attached to the alert. - WarehouseId string `json:"warehouse_id,omitempty"` - - ForceSendFields []string `json:"-" url:"-"` -} - -func (s *ListAlertsV2ResponseAlert) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s ListAlertsV2ResponseAlert) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - // Get dashboard objects type ListDashboardsRequest struct { // Name of dashboard attribute to order by. diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index d9727496d..80ca3e57c 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -678,7 +678,7 @@ func (s QueryVectorIndexResponse) MarshalJSON() ([]byte, error) { // Data returned in the query result. type ResultData struct { // Data rows returned in the query. - DataArray []ListValue `json:"data_array,omitempty"` + DataArray [][]string `json:"data_array,omitempty"` // Number of rows in the result set. RowCount int `json:"row_count,omitempty"`