diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 26ece1bc5..8cd956362 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -05692f4dcf168be190bb7bcda725ee8b368b7ae3 \ No newline at end of file +06a18b97d7996d6cd8dd88bfdb0f2c2792739e46 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index da04156a9..3296c8f9f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -110,7 +110,10 @@ experimental/mocks/service/settings/mock_default_namespace_interface.go linguist experimental/mocks/service/settings/mock_disable_legacy_access_interface.go linguist-generated=true experimental/mocks/service/settings/mock_disable_legacy_dbfs_interface.go linguist-generated=true experimental/mocks/service/settings/mock_disable_legacy_features_interface.go linguist-generated=true +experimental/mocks/service/settings/mock_enable_export_notebook_interface.go linguist-generated=true experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go linguist-generated=true +experimental/mocks/service/settings/mock_enable_notebook_table_clipboard_interface.go linguist-generated=true +experimental/mocks/service/settings/mock_enable_results_downloading_interface.go linguist-generated=true experimental/mocks/service/settings/mock_enhanced_security_monitoring_interface.go linguist-generated=true experimental/mocks/service/settings/mock_esm_enablement_account_interface.go linguist-generated=true experimental/mocks/service/settings/mock_ip_access_lists_interface.go linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 8c772f608..d7c78f201 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -12,3 +12,20 @@ ### Internal Changes ### API Changes +* Added `UpdateEndpointBudgetPolicy` and `UpdateEndpointCustomTags` methods for [w.VectorSearchEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI) workspace-level service. +* Added `NodeTypeFlexibility` field for [compute.EditInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EditInstancePool). +* Added `PageSize` and `PageToken` fields for [compute.GetEvents](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEvents). +* Added `NextPageToken` and `PrevPageToken` fields for [compute.GetEventsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEventsResponse). +* Added `NodeTypeFlexibility` field for [compute.GetInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetInstancePool). +* Added `NodeTypeFlexibility` field for [compute.InstancePoolAndStats](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InstancePoolAndStats). +* Added `EffectivePerformanceTarget` field for [jobs.RepairHistoryItem](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairHistoryItem). +* Added `PerformanceTarget` field for [jobs.RepairRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairRun). +* Added `BudgetPolicyId` field for [vectorsearch.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#CreateEndpoint). +* Added `CustomTags` and `EffectiveBudgetPolicyId` fields for [vectorsearch.EndpointInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#EndpointInfo). +* Added `Disabled` enum value for [jobs.TerminationCodeCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TerminationCodeCode). +* [Breaking] Changed `CreateIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service to return [vectorsearch.VectorIndex](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorIndex). +* [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service . HTTP method/verb has changed. +* [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service with new required argument order. +* [Breaking] Changed `DataArray` field for [vectorsearch.ResultData](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ResultData) to type [vectorsearch.ListValueList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValueList). +* [Breaking] Changed waiter for [VectorSearchEndpointsAPI.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI.CreateEndpoint). +* [Breaking] Removed `NullValue` field for [vectorsearch.Value](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#Value). diff --git a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go index 50e397591..9cc2b4d0f 100644 --- a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go +++ b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go @@ -481,6 +481,124 @@ func (_c *MockVectorSearchEndpointsInterface_ListEndpointsAll_Call) RunAndReturn return _c } +// UpdateEndpointBudgetPolicy provides a mock function with given fields: ctx, request +func (_m *MockVectorSearchEndpointsInterface) UpdateEndpointBudgetPolicy(ctx context.Context, request vectorsearch.PatchEndpointBudgetPolicyRequest) (*vectorsearch.PatchEndpointBudgetPolicyResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for UpdateEndpointBudgetPolicy") + } + + var r0 *vectorsearch.PatchEndpointBudgetPolicyResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.PatchEndpointBudgetPolicyRequest) (*vectorsearch.PatchEndpointBudgetPolicyResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.PatchEndpointBudgetPolicyRequest) *vectorsearch.PatchEndpointBudgetPolicyResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vectorsearch.PatchEndpointBudgetPolicyResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, vectorsearch.PatchEndpointBudgetPolicyRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEndpointBudgetPolicy' +type MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call struct { + *mock.Call +} + +// UpdateEndpointBudgetPolicy is a helper method to define mock.On call +// - ctx context.Context +// - request vectorsearch.PatchEndpointBudgetPolicyRequest +func (_e *MockVectorSearchEndpointsInterface_Expecter) UpdateEndpointBudgetPolicy(ctx interface{}, request interface{}) *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call { + return &MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call{Call: _e.mock.On("UpdateEndpointBudgetPolicy", ctx, request)} +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call) Run(run func(ctx context.Context, request vectorsearch.PatchEndpointBudgetPolicyRequest)) *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(vectorsearch.PatchEndpointBudgetPolicyRequest)) + }) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call) Return(_a0 *vectorsearch.PatchEndpointBudgetPolicyResponse, _a1 error) *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call) RunAndReturn(run func(context.Context, vectorsearch.PatchEndpointBudgetPolicyRequest) (*vectorsearch.PatchEndpointBudgetPolicyResponse, error)) *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call { + _c.Call.Return(run) + return _c +} + +// UpdateEndpointCustomTags provides a mock function with given fields: ctx, request +func (_m *MockVectorSearchEndpointsInterface) UpdateEndpointCustomTags(ctx context.Context, request vectorsearch.UpdateEndpointCustomTagsRequest) (*vectorsearch.UpdateEndpointCustomTagsResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for UpdateEndpointCustomTags") + } + + var r0 *vectorsearch.UpdateEndpointCustomTagsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.UpdateEndpointCustomTagsRequest) (*vectorsearch.UpdateEndpointCustomTagsResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.UpdateEndpointCustomTagsRequest) *vectorsearch.UpdateEndpointCustomTagsResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vectorsearch.UpdateEndpointCustomTagsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, vectorsearch.UpdateEndpointCustomTagsRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEndpointCustomTags' +type MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call struct { + *mock.Call +} + +// UpdateEndpointCustomTags is a helper method to define mock.On call +// - ctx context.Context +// - request vectorsearch.UpdateEndpointCustomTagsRequest +func (_e *MockVectorSearchEndpointsInterface_Expecter) UpdateEndpointCustomTags(ctx interface{}, request interface{}) *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call { + return &MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call{Call: _e.mock.On("UpdateEndpointCustomTags", ctx, request)} +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call) Run(run func(ctx context.Context, request vectorsearch.UpdateEndpointCustomTagsRequest)) *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(vectorsearch.UpdateEndpointCustomTagsRequest)) + }) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call) Return(_a0 *vectorsearch.UpdateEndpointCustomTagsResponse, _a1 error) *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call) RunAndReturn(run func(context.Context, vectorsearch.UpdateEndpointCustomTagsRequest) (*vectorsearch.UpdateEndpointCustomTagsResponse, error)) *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call { + _c.Call.Return(run) + return _c +} + // WaitGetEndpointVectorSearchEndpointOnline provides a mock function with given fields: ctx, endpointName, timeout, callback func (_m *MockVectorSearchEndpointsInterface) WaitGetEndpointVectorSearchEndpointOnline(ctx context.Context, endpointName string, timeout time.Duration, callback func(*vectorsearch.EndpointInfo)) (*vectorsearch.EndpointInfo, error) { ret := _m.Called(ctx, endpointName, timeout, callback) diff --git a/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go b/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go index 8fd95e648..b396a28a5 100644 --- a/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go +++ b/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go @@ -25,23 +25,23 @@ func (_m *MockVectorSearchIndexesInterface) EXPECT() *MockVectorSearchIndexesInt } // CreateIndex provides a mock function with given fields: ctx, request -func (_m *MockVectorSearchIndexesInterface) CreateIndex(ctx context.Context, request vectorsearch.CreateVectorIndexRequest) (*vectorsearch.CreateVectorIndexResponse, error) { +func (_m *MockVectorSearchIndexesInterface) CreateIndex(ctx context.Context, request vectorsearch.CreateVectorIndexRequest) (*vectorsearch.VectorIndex, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for CreateIndex") } - var r0 *vectorsearch.CreateVectorIndexResponse + var r0 *vectorsearch.VectorIndex var r1 error - if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.CreateVectorIndexRequest) (*vectorsearch.CreateVectorIndexResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.CreateVectorIndexRequest) (*vectorsearch.VectorIndex, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.CreateVectorIndexRequest) *vectorsearch.CreateVectorIndexResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.CreateVectorIndexRequest) *vectorsearch.VectorIndex); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*vectorsearch.CreateVectorIndexResponse) + r0 = ret.Get(0).(*vectorsearch.VectorIndex) } } @@ -73,12 +73,12 @@ func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) Run(run func(ctx co return _c } -func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) Return(_a0 *vectorsearch.CreateVectorIndexResponse, _a1 error) *MockVectorSearchIndexesInterface_CreateIndex_Call { +func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) Return(_a0 *vectorsearch.VectorIndex, _a1 error) *MockVectorSearchIndexesInterface_CreateIndex_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) RunAndReturn(run func(context.Context, vectorsearch.CreateVectorIndexRequest) (*vectorsearch.CreateVectorIndexResponse, error)) *MockVectorSearchIndexesInterface_CreateIndex_Call { +func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) RunAndReturn(run func(context.Context, vectorsearch.CreateVectorIndexRequest) (*vectorsearch.VectorIndex, error)) *MockVectorSearchIndexesInterface_CreateIndex_Call { _c.Call.Return(run) return _c } @@ -142,6 +142,65 @@ func (_c *MockVectorSearchIndexesInterface_DeleteDataVectorIndex_Call) RunAndRet return _c } +// DeleteDataVectorIndexByIndexName provides a mock function with given fields: ctx, indexName +func (_m *MockVectorSearchIndexesInterface) DeleteDataVectorIndexByIndexName(ctx context.Context, indexName string) (*vectorsearch.DeleteDataVectorIndexResponse, error) { + ret := _m.Called(ctx, indexName) + + if len(ret) == 0 { + panic("no return value specified for DeleteDataVectorIndexByIndexName") + } + + var r0 *vectorsearch.DeleteDataVectorIndexResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*vectorsearch.DeleteDataVectorIndexResponse, error)); ok { + return rf(ctx, indexName) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *vectorsearch.DeleteDataVectorIndexResponse); ok { + r0 = rf(ctx, indexName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vectorsearch.DeleteDataVectorIndexResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, indexName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteDataVectorIndexByIndexName' +type MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call struct { + *mock.Call +} + +// DeleteDataVectorIndexByIndexName is a helper method to define mock.On call +// - ctx context.Context +// - indexName string +func (_e *MockVectorSearchIndexesInterface_Expecter) DeleteDataVectorIndexByIndexName(ctx interface{}, indexName interface{}) *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call { + return &MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call{Call: _e.mock.On("DeleteDataVectorIndexByIndexName", ctx, indexName)} +} + +func (_c *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call) Run(run func(ctx context.Context, indexName string)) *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call) Return(_a0 *vectorsearch.DeleteDataVectorIndexResponse, _a1 error) *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call) RunAndReturn(run func(context.Context, string) (*vectorsearch.DeleteDataVectorIndexResponse, error)) *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call { + _c.Call.Return(run) + return _c +} + // DeleteIndex provides a mock function with given fields: ctx, request func (_m *MockVectorSearchIndexesInterface) DeleteIndex(ctx context.Context, request vectorsearch.DeleteIndexRequest) error { ret := _m.Called(ctx, request) diff --git a/service/cleanrooms/model.go b/service/cleanrooms/model.go index ac92cb087..7b7a441a0 100755 --- a/service/cleanrooms/model.go +++ b/service/cleanrooms/model.go @@ -191,7 +191,7 @@ func (s CleanRoomAssetForeignTableLocalDetails) MarshalJSON() ([]byte, error) { } type CleanRoomAssetNotebook struct { - // Server generated checksum that represents the notebook version. + // Server generated etag that represents the notebook version. Etag string `json:"etag,omitempty"` // Base 64 representation of the notebook contents. This is the same format // as returned by :method:workspace/export with the format of **HTML**. diff --git a/service/compute/model.go b/service/compute/model.go index c34ffb688..394488a1d 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2631,6 +2631,10 @@ type EditInstancePool struct { MaxCapacity int `json:"max_capacity,omitempty"` // Minimum number of idle instances to keep in the instance pool MinIdleInstances int `json:"min_idle_instances,omitempty"` + // For Fleet-pool V2, this object contains the information about the + // alternate node type ids to use when attempting to launch a cluster if the + // node type id is not available. + NodeTypeFlexibility *NodeTypeFlexibility `json:"node_type_flexibility,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can // be provisioned and optimized for memory or compute intensive workloads. A @@ -3104,15 +3108,29 @@ type GetEvents struct { // An optional set of event types to filter on. If empty, all event types // are returned. EventTypes []EventType `json:"event_types,omitempty"` + // Deprecated: use page_token in combination with page_size instead. + // // The maximum number of events to include in a page of events. Defaults to // 50, and maximum allowed value is 500. Limit int64 `json:"limit,omitempty"` + // Deprecated: use page_token in combination with page_size instead. + // // The offset in the result set. Defaults to 0 (no offset). When an offset // is specified and the results are requested in descending order, the // end_time field is required. Offset int64 `json:"offset,omitempty"` // The order to list events in; either "ASC" or "DESC". Defaults to "DESC". Order GetEventsOrder `json:"order,omitempty"` + // The maximum number of events to include in a page of events. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is empty or 0, the server will decide the number + // of results to be returned. The field has to be in the range [0,500]. If + // the value is outside the range, the server enforces 0 or 500. + PageSize int `json:"page_size,omitempty"` + // Use next_page_token or prev_page_token returned from the previous request + // to list the next or previous page of events respectively. If page_token + // is empty, the first page is returned. + PageToken string `json:"page_token,omitempty"` // The start time in epoch milliseconds. If empty, returns events starting // from the beginning of time. StartTime int64 `json:"start_time,omitempty"` @@ -3157,9 +3175,21 @@ func (f *GetEventsOrder) Type() string { type GetEventsResponse struct { Events []ClusterEvent `json:"events,omitempty"` + // Deprecated: use next_page_token or prev_page_token instead. + // // The parameters required to retrieve the next page of events. Omitted if // there are no more events to read. NextPage *GetEvents `json:"next_page,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is "", it means no further results for the request. + NextPageToken string `json:"next_page_token,omitempty"` + // This field represents the pagination token to retrieve the previous page + // of results. If the value is "", it means no further results for the + // request. + PrevPageToken string `json:"prev_page_token,omitempty"` + // Deprecated: Returns 0 when request uses page_token. Will start returning + // zero when request uses offset/limit soon. + // // The total number of events filtered by the start_time, end_time, and // event_types. TotalCount int64 `json:"total_count,omitempty"` @@ -3236,6 +3266,10 @@ type GetInstancePool struct { MaxCapacity int `json:"max_capacity,omitempty"` // Minimum number of idle instances to keep in the instance pool MinIdleInstances int `json:"min_idle_instances,omitempty"` + // For Fleet-pool V2, this object contains the information about the + // alternate node type ids to use when attempting to launch a cluster if the + // node type id is not available. + NodeTypeFlexibility *NodeTypeFlexibility `json:"node_type_flexibility,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can // be provisioned and optimized for memory or compute intensive workloads. A @@ -3689,6 +3723,10 @@ type InstancePoolAndStats struct { MaxCapacity int `json:"max_capacity,omitempty"` // Minimum number of idle instances to keep in the instance pool MinIdleInstances int `json:"min_idle_instances,omitempty"` + // For Fleet-pool V2, this object contains the information about the + // alternate node type ids to use when attempting to launch a cluster if the + // node type id is not available. + NodeTypeFlexibility *NodeTypeFlexibility `json:"node_type_flexibility,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can // be provisioned and optimized for memory or compute intensive workloads. A @@ -4722,6 +4760,12 @@ func (s NodeType) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// For Fleet-V2 using classic clusters, this object contains the information +// about the alternate node type ids to use when attempting to launch a cluster. +// It can be used with both the driver and worker node types. +type NodeTypeFlexibility struct { +} + // Error message of a failed pending instances type PendingInstanceError struct { InstanceId string `json:"instance_id,omitempty"` diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 3e6b1ccbf..5e3771be4 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -48,10 +48,9 @@ type GenieInterface interface { // Generate full query result download. // - // Initiate full SQL query result download and obtain a `download_id` to track - // the download progress. This call initiates a new SQL execution to generate - // the query result. The result is stored in an external link can be retrieved - // using the [Get Download Full Query + // Initiates a new SQL execution and returns a `download_id` that you can use to + // track the progress of the download. The query result is stored in an external + // link and can be retrieved using the [Get Download Full Query // Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks // strongly recommends that you protect the URLs that are returned by the // `EXTERNAL_LINKS` disposition. See [Execute @@ -62,16 +61,13 @@ type GenieInterface interface { // // After [Generating a Full Query Result // Download](:method:genie/getdownloadfullqueryresult) and successfully - // receiving a `download_id`, use this API to Poll download progress and - // retrieve the SQL query result external link(s) upon completion. Warning: - // Databricks strongly recommends that you protect the URLs that are returned by - // the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` - // disposition, a short-lived, presigned URL is generated, which can be used to - // download the results directly from Amazon S3. As a short-lived access - // credential is embedded in this presigned URL, you should protect the URL. - // Because presigned URLs are already generated with embedded temporary access - // credentials, you must not set an Authorization header in the download - // requests. See [Execute + // receiving a `download_id`, use this API to poll the download progress. When + // the download is complete, the API returns one or more external links to the + // query result files. Warning: Databricks strongly recommends that you protect + // the URLs that are returned by the `EXTERNAL_LINKS` disposition. You must not + // set an Authorization header in download requests. When using the + // `EXTERNAL_LINKS` disposition, Databricks returns presigned URLs that grant + // temporary access to data. See [Execute // Statement](:method:statementexecution/executestatement) for more details. GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) @@ -79,16 +75,13 @@ type GenieInterface interface { // // After [Generating a Full Query Result // Download](:method:genie/getdownloadfullqueryresult) and successfully - // receiving a `download_id`, use this API to Poll download progress and - // retrieve the SQL query result external link(s) upon completion. Warning: - // Databricks strongly recommends that you protect the URLs that are returned by - // the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` - // disposition, a short-lived, presigned URL is generated, which can be used to - // download the results directly from Amazon S3. As a short-lived access - // credential is embedded in this presigned URL, you should protect the URL. - // Because presigned URLs are already generated with embedded temporary access - // credentials, you must not set an Authorization header in the download - // requests. See [Execute + // receiving a `download_id`, use this API to poll the download progress. When + // the download is complete, the API returns one or more external links to the + // query result files. Warning: Databricks strongly recommends that you protect + // the URLs that are returned by the `EXTERNAL_LINKS` disposition. You must not + // set an Authorization header in download requests. When using the + // `EXTERNAL_LINKS` disposition, Databricks returns presigned URLs that grant + // temporary access to data. See [Execute // Statement](:method:statementexecution/executestatement) for more details. GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*GenieGetDownloadFullQueryResultResponse, error) @@ -297,16 +290,13 @@ func (a *GenieAPI) CreateMessageAndWait(ctx context.Context, genieCreateConversa // // After [Generating a Full Query Result // Download](:method:genie/getdownloadfullqueryresult) and successfully -// receiving a `download_id`, use this API to Poll download progress and -// retrieve the SQL query result external link(s) upon completion. Warning: -// Databricks strongly recommends that you protect the URLs that are returned by -// the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` -// disposition, a short-lived, presigned URL is generated, which can be used to -// download the results directly from Amazon S3. As a short-lived access -// credential is embedded in this presigned URL, you should protect the URL. -// Because presigned URLs are already generated with embedded temporary access -// credentials, you must not set an Authorization header in the download -// requests. See [Execute +// receiving a `download_id`, use this API to poll the download progress. When +// the download is complete, the API returns one or more external links to the +// query result files. Warning: Databricks strongly recommends that you protect +// the URLs that are returned by the `EXTERNAL_LINKS` disposition. You must not +// set an Authorization header in download requests. When using the +// `EXTERNAL_LINKS` disposition, Databricks returns presigned URLs that grant +// temporary access to data. See [Execute // Statement](:method:statementexecution/executestatement) for more details. func (a *GenieAPI) GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*GenieGetDownloadFullQueryResultResponse, error) { return a.genieImpl.GetDownloadFullQueryResult(ctx, GenieGetDownloadFullQueryResultRequest{ diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index 0df971d5f..ffc8bdc7a 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -33,10 +33,9 @@ type GenieService interface { // Generate full query result download. // - // Initiate full SQL query result download and obtain a `download_id` to - // track the download progress. This call initiates a new SQL execution to - // generate the query result. The result is stored in an external link can - // be retrieved using the [Get Download Full Query + // Initiates a new SQL execution and returns a `download_id` that you can + // use to track the progress of the download. The query result is stored in + // an external link and can be retrieved using the [Get Download Full Query // Result](:method:genie/getdownloadfullqueryresult) API. Warning: // Databricks strongly recommends that you protect the URLs that are // returned by the `EXTERNAL_LINKS` disposition. See [Execute @@ -47,16 +46,13 @@ type GenieService interface { // // After [Generating a Full Query Result // Download](:method:genie/getdownloadfullqueryresult) and successfully - // receiving a `download_id`, use this API to Poll download progress and - // retrieve the SQL query result external link(s) upon completion. Warning: - // Databricks strongly recommends that you protect the URLs that are - // returned by the `EXTERNAL_LINKS` disposition. When you use the - // `EXTERNAL_LINKS` disposition, a short-lived, presigned URL is generated, - // which can be used to download the results directly from Amazon S3. As a - // short-lived access credential is embedded in this presigned URL, you - // should protect the URL. Because presigned URLs are already generated with - // embedded temporary access credentials, you must not set an Authorization - // header in the download requests. See [Execute + // receiving a `download_id`, use this API to poll the download progress. + // When the download is complete, the API returns one or more external links + // to the query result files. Warning: Databricks strongly recommends that + // you protect the URLs that are returned by the `EXTERNAL_LINKS` + // disposition. You must not set an Authorization header in download + // requests. When using the `EXTERNAL_LINKS` disposition, Databricks returns + // presigned URLs that grant temporary access to data. See [Execute // Statement](:method:statementexecution/executestatement) for more details. GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) diff --git a/service/dashboards/model.go b/service/dashboards/model.go index b9b9b168a..a3f936531 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -354,7 +354,7 @@ type GenieGenerateDownloadFullQueryResultRequest struct { ConversationId string `json:"-" url:"-"` // Message ID MessageId string `json:"-" url:"-"` - // Space ID + // Genie space ID SpaceId string `json:"-" url:"-"` } @@ -397,7 +397,7 @@ type GenieGetDownloadFullQueryResultRequest struct { DownloadId string `json:"-" url:"-"` // Message ID MessageId string `json:"-" url:"-"` - // Space ID + // Genie space ID SpaceId string `json:"-" url:"-"` } @@ -557,7 +557,7 @@ func (s GenieResultMetadata) MarshalJSON() ([]byte, error) { type GenieSpace struct { // Description of the Genie Space Description string `json:"description,omitempty"` - // Space ID + // Genie space ID SpaceId string `json:"space_id"` // Title of the Genie Space Title string `json:"title"` diff --git a/service/jobs/model.go b/service/jobs/model.go index eb7628933..e3e23e7a8 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -361,10 +361,12 @@ func (f *CleanRoomTaskRunResultState) Type() string { // Stores the run state of the clean rooms notebook task. type CleanRoomTaskRunState struct { // A value indicating the run's current lifecycle state. This field is - // always available in the response. + // always available in the response. Note: Additional states might be + // introduced in future releases. LifeCycleState CleanRoomTaskRunLifeCycleState `json:"life_cycle_state,omitempty"` // A value indicating the run's result. This field is only available for - // terminal lifecycle states. + // terminal lifecycle states. Note: Additional states might be introduced in + // future releases. ResultState CleanRoomTaskRunResultState `json:"result_state,omitempty"` } @@ -753,10 +755,13 @@ func (s DashboardPageSnapshot) MarshalJSON() ([]byte, error) { // Configures the Lakeview Dashboard job task type. type DashboardTask struct { + // The identifier of the dashboard to refresh. DashboardId string `json:"dashboard_id,omitempty"` - + // Optional: subscription configuration for sending the dashboard snapshot. Subscription *Subscription `json:"subscription,omitempty"` - // The warehouse id to execute the dashboard with for the schedule + // Optional: The warehouse id to execute the dashboard with for the + // schedule. If not specified, the default warehouse of the dashboard will + // be used. WarehouseId string `json:"warehouse_id,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -2578,6 +2583,15 @@ type QueueSettings struct { } type RepairHistoryItem struct { + // The actual performance target used by the serverless run during + // execution. This can differ from the client-set performance target on the + // request depending on whether the performance mode is supported by the job + // type. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. + EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The end time of the (repaired) run. EndTime int64 `json:"end_time,omitempty"` // The ID of the repair. Only returned for the items that represent a repair @@ -2681,6 +2695,15 @@ type RepairRun struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]string `json:"notebook_params,omitempty"` + // The performance mode on a serverless job. The performance target + // determines the level of compute performance or cost-efficiency for the + // run. This field overrides the performance target defined on the job + // level. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. + PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` @@ -3691,12 +3714,14 @@ func (f *RunResultState) Type() string { // The current state of the run. type RunState struct { // A value indicating the run's current lifecycle state. This field is - // always available in the response. + // always available in the response. Note: Additional states might be + // introduced in future releases. LifeCycleState RunLifeCycleState `json:"life_cycle_state,omitempty"` // The reason indicating why the run was queued. QueueReason string `json:"queue_reason,omitempty"` // A value indicating the run's result. This field is only available for - // terminal lifecycle states. + // terminal lifecycle states. Note: Additional states might be introduced in + // future releases. ResultState RunResultState `json:"result_state,omitempty"` // A descriptive message for the current state. This field is unstructured, // and its exact format is subject to change. @@ -3757,7 +3782,7 @@ type RunTask struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *RunConditionTask `json:"condition_task,omitempty"` - // The task runs a DashboardTask when the `dashboard_task` field is present. + // The task refreshes a dashboard and sends a snapshot to subscribers. DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use @@ -4504,7 +4529,7 @@ type SubmitTask struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` - // The task runs a DashboardTask when the `dashboard_task` field is present. + // The task refreshes a dashboard and sends a snapshot to subscribers. DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use @@ -4621,7 +4646,7 @@ type Subscription struct { CustomSubject string `json:"custom_subject,omitempty"` // When true, the subscription will not send emails. Paused bool `json:"paused,omitempty"` - + // The list of subscribers to send the snapshot of the dashboard to. Subscribers []SubscriptionSubscriber `json:"subscribers,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -4636,8 +4661,11 @@ func (s Subscription) MarshalJSON() ([]byte, error) { } type SubscriptionSubscriber struct { + // A snapshot of the dashboard will be sent to the destination when the + // `destination_id` field is present. DestinationId string `json:"destination_id,omitempty"` - + // A snapshot of the dashboard will be sent to the user's email when the + // `user_name` field is present. UserName string `json:"user_name,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -4689,7 +4717,7 @@ type Task struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` - // The task runs a DashboardTask when the `dashboard_task` field is present. + // The task refreshes a dashboard and sends a snapshot to subscribers. DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use @@ -4951,7 +4979,8 @@ func (s TaskNotificationSettings) MarshalJSON() ([]byte, error) { // details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. // Refer to the state message for further details. * // `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job -// level queue size limit. +// level queue size limit. * `DISABLED`: The run was never executed because it +// was disabled explicitly by the user. // // [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now type TerminationCodeCode string @@ -4975,6 +5004,9 @@ const TerminationCodeCodeClusterError TerminationCodeCode = `CLUSTER_ERROR` // frame. const TerminationCodeCodeClusterRequestLimitExceeded TerminationCodeCode = `CLUSTER_REQUEST_LIMIT_EXCEEDED` +// The run was never executed because it was disabled explicitly by the user. +const TerminationCodeCodeDisabled TerminationCodeCode = `DISABLED` + // The run encountered an error while communicating with the Spark Driver. const TerminationCodeCodeDriverError TerminationCodeCode = `DRIVER_ERROR` @@ -5055,11 +5087,11 @@ func (f *TerminationCodeCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationCodeCode) Set(v string) error { switch v { - case `BUDGET_POLICY_LIMIT_EXCEEDED`, `CANCELED`, `CLOUD_FAILURE`, `CLUSTER_ERROR`, `CLUSTER_REQUEST_LIMIT_EXCEEDED`, `DRIVER_ERROR`, `FEATURE_DISABLED`, `INTERNAL_ERROR`, `INVALID_CLUSTER_REQUEST`, `INVALID_RUN_CONFIGURATION`, `LIBRARY_INSTALLATION_ERROR`, `MAX_CONCURRENT_RUNS_EXCEEDED`, `MAX_JOB_QUEUE_SIZE_EXCEEDED`, `MAX_SPARK_CONTEXTS_EXCEEDED`, `REPOSITORY_CHECKOUT_FAILED`, `RESOURCE_NOT_FOUND`, `RUN_EXECUTION_ERROR`, `SKIPPED`, `STORAGE_ACCESS_ERROR`, `SUCCESS`, `UNAUTHORIZED_ERROR`, `USER_CANCELED`, `WORKSPACE_RUN_LIMIT_EXCEEDED`: + case `BUDGET_POLICY_LIMIT_EXCEEDED`, `CANCELED`, `CLOUD_FAILURE`, `CLUSTER_ERROR`, `CLUSTER_REQUEST_LIMIT_EXCEEDED`, `DISABLED`, `DRIVER_ERROR`, `FEATURE_DISABLED`, `INTERNAL_ERROR`, `INVALID_CLUSTER_REQUEST`, `INVALID_RUN_CONFIGURATION`, `LIBRARY_INSTALLATION_ERROR`, `MAX_CONCURRENT_RUNS_EXCEEDED`, `MAX_JOB_QUEUE_SIZE_EXCEEDED`, `MAX_SPARK_CONTEXTS_EXCEEDED`, `REPOSITORY_CHECKOUT_FAILED`, `RESOURCE_NOT_FOUND`, `RUN_EXECUTION_ERROR`, `SKIPPED`, `STORAGE_ACCESS_ERROR`, `SUCCESS`, `UNAUTHORIZED_ERROR`, `USER_CANCELED`, `WORKSPACE_RUN_LIMIT_EXCEEDED`: *f = TerminationCodeCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BUDGET_POLICY_LIMIT_EXCEEDED", "CANCELED", "CLOUD_FAILURE", "CLUSTER_ERROR", "CLUSTER_REQUEST_LIMIT_EXCEEDED", "DRIVER_ERROR", "FEATURE_DISABLED", "INTERNAL_ERROR", "INVALID_CLUSTER_REQUEST", "INVALID_RUN_CONFIGURATION", "LIBRARY_INSTALLATION_ERROR", "MAX_CONCURRENT_RUNS_EXCEEDED", "MAX_JOB_QUEUE_SIZE_EXCEEDED", "MAX_SPARK_CONTEXTS_EXCEEDED", "REPOSITORY_CHECKOUT_FAILED", "RESOURCE_NOT_FOUND", "RUN_EXECUTION_ERROR", "SKIPPED", "STORAGE_ACCESS_ERROR", "SUCCESS", "UNAUTHORIZED_ERROR", "USER_CANCELED", "WORKSPACE_RUN_LIMIT_EXCEEDED"`, v) + return fmt.Errorf(`value "%s" is not one of "BUDGET_POLICY_LIMIT_EXCEEDED", "CANCELED", "CLOUD_FAILURE", "CLUSTER_ERROR", "CLUSTER_REQUEST_LIMIT_EXCEEDED", "DISABLED", "DRIVER_ERROR", "FEATURE_DISABLED", "INTERNAL_ERROR", "INVALID_CLUSTER_REQUEST", "INVALID_RUN_CONFIGURATION", "LIBRARY_INSTALLATION_ERROR", "MAX_CONCURRENT_RUNS_EXCEEDED", "MAX_JOB_QUEUE_SIZE_EXCEEDED", "MAX_SPARK_CONTEXTS_EXCEEDED", "REPOSITORY_CHECKOUT_FAILED", "RESOURCE_NOT_FOUND", "RUN_EXECUTION_ERROR", "SKIPPED", "STORAGE_ACCESS_ERROR", "SUCCESS", "UNAUTHORIZED_ERROR", "USER_CANCELED", "WORKSPACE_RUN_LIMIT_EXCEEDED"`, v) } } @@ -5111,7 +5143,9 @@ type TerminationDetails struct { // configuration. Refer to the state message for further details. * // `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to // the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: - // The run was skipped due to reaching the job level queue size limit. + // The run was skipped due to reaching the job level queue size limit. * + // `DISABLED`: The run was never executed because it was disabled explicitly + // by the user. // // [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now Code TerminationCodeCode `json:"code,omitempty"` diff --git a/service/pkg.go b/service/pkg.go index 95dbd2fba..aaefe6bdf 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -52,10 +52,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. -// // - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. // +// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. @@ -82,7 +82,7 @@ // // - [settings.DisableLegacyFeaturesAPI]: Disable legacy features for new Databricks workspaces. // -// - [settings.EnableExportNotebookAPI]: Controls whether users can export notebooks and files from the Workspace. +// - [settings.EnableExportNotebookAPI]: Controls whether users can export notebooks and files from the Workspace UI. // // - [settings.EnableIpAccessListsAPI]: Controls the enforcement of IP access lists for accessing the account console. // diff --git a/service/settings/api.go b/service/settings/api.go index efbd46945..cb9a7f1ff 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -706,14 +706,14 @@ type DisableLegacyFeaturesAPI struct { type EnableExportNotebookInterface interface { - // Get the Enable Export Notebook setting. + // Get the Notebook and File exporting setting. // - // Gets the Enable Export Notebook setting. + // Gets the Notebook and File exporting setting. GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) - // Update the Enable Export Notebook setting. + // Update the Notebook and File exporting setting. // - // Updates the Enable Export Notebook setting. The model follows eventual + // Updates the Notebook and File exporting setting. The model follows eventual // consistency, which means the get after the update operation might receive // stale values for some time. PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) @@ -727,8 +727,8 @@ func NewEnableExportNotebook(client *client.DatabricksClient) *EnableExportNoteb } } -// Controls whether users can export notebooks and files from the Workspace. By -// default, this setting is enabled. +// Controls whether users can export notebooks and files from the Workspace UI. +// By default, this setting is enabled. type EnableExportNotebookAPI struct { enableExportNotebookImpl } @@ -768,14 +768,14 @@ type EnableIpAccessListsAPI struct { type EnableNotebookTableClipboardInterface interface { - // Get the Enable Notebook Table Clipboard setting. + // Get the Results Table Clipboard features setting. // - // Gets the Enable Notebook Table Clipboard setting. + // Gets the Results Table Clipboard features setting. GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) - // Update the Enable Notebook Table Clipboard setting. + // Update the Results Table Clipboard features setting. // - // Updates the Enable Notebook Table Clipboard setting. The model follows + // Updates the Results Table Clipboard features setting. The model follows // eventual consistency, which means the get after the update operation might // receive stale values for some time. PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) @@ -797,14 +797,14 @@ type EnableNotebookTableClipboardAPI struct { type EnableResultsDownloadingInterface interface { - // Get the Enable Results Downloading setting. + // Get the Notebook results download setting. // - // Gets the Enable Results Downloading setting. + // Gets the Notebook results download setting. GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) - // Update the Enable Results Downloading setting. + // Update the Notebook results download setting. // - // Updates the Enable Results Downloading setting. The model follows eventual + // Updates the Notebook results download setting. The model follows eventual // consistency, which means the get after the update operation might receive // stale values for some time. PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) @@ -1500,8 +1500,8 @@ type SettingsInterface interface { // all DBFS functionality is enabled DisableLegacyDbfs() DisableLegacyDbfsInterface - // Controls whether users can export notebooks and files from the Workspace. - // By default, this setting is enabled. + // Controls whether users can export notebooks and files from the Workspace + // UI. By default, this setting is enabled. EnableExportNotebook() EnableExportNotebookInterface // Controls whether users can copy tabular data to the clipboard via the UI. @@ -1623,8 +1623,8 @@ type SettingsAPI struct { // all DBFS functionality is enabled disableLegacyDbfs DisableLegacyDbfsInterface - // Controls whether users can export notebooks and files from the Workspace. - // By default, this setting is enabled. + // Controls whether users can export notebooks and files from the Workspace + // UI. By default, this setting is enabled. enableExportNotebook EnableExportNotebookInterface // Controls whether users can copy tabular data to the clipboard via the UI. diff --git a/service/settings/interface.go b/service/settings/interface.go index f5faae367..8042e91ae 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -343,20 +343,20 @@ type DisableLegacyFeaturesService interface { Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) } -// Controls whether users can export notebooks and files from the Workspace. By -// default, this setting is enabled. +// Controls whether users can export notebooks and files from the Workspace UI. +// By default, this setting is enabled. type EnableExportNotebookService interface { - // Get the Enable Export Notebook setting. + // Get the Notebook and File exporting setting. // - // Gets the Enable Export Notebook setting. + // Gets the Notebook and File exporting setting. GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) - // Update the Enable Export Notebook setting. + // Update the Notebook and File exporting setting. // - // Updates the Enable Export Notebook setting. The model follows eventual - // consistency, which means the get after the update operation might receive - // stale values for some time. + // Updates the Notebook and File exporting setting. The model follows + // eventual consistency, which means the get after the update operation + // might receive stale values for some time. PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) } @@ -385,14 +385,14 @@ type EnableIpAccessListsService interface { // default, this setting is enabled. type EnableNotebookTableClipboardService interface { - // Get the Enable Notebook Table Clipboard setting. + // Get the Results Table Clipboard features setting. // - // Gets the Enable Notebook Table Clipboard setting. + // Gets the Results Table Clipboard features setting. GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) - // Update the Enable Notebook Table Clipboard setting. + // Update the Results Table Clipboard features setting. // - // Updates the Enable Notebook Table Clipboard setting. The model follows + // Updates the Results Table Clipboard features setting. The model follows // eventual consistency, which means the get after the update operation // might receive stale values for some time. PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) @@ -402,16 +402,16 @@ type EnableNotebookTableClipboardService interface { // setting is enabled. type EnableResultsDownloadingService interface { - // Get the Enable Results Downloading setting. + // Get the Notebook results download setting. // - // Gets the Enable Results Downloading setting. + // Gets the Notebook results download setting. GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) - // Update the Enable Results Downloading setting. + // Update the Notebook results download setting. // - // Updates the Enable Results Downloading setting. The model follows - // eventual consistency, which means the get after the update operation - // might receive stale values for some time. + // Updates the Notebook results download setting. The model follows eventual + // consistency, which means the get after the update operation might receive + // stale values for some time. PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) } diff --git a/service/settings/model.go b/service/settings/model.go index 39daaf756..8775aaa34 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -396,6 +396,8 @@ const ComplianceStandardIsmap ComplianceStandard = `ISMAP` const ComplianceStandardItarEar ComplianceStandard = `ITAR_EAR` +const ComplianceStandardKFsi ComplianceStandard = `K_FSI` + const ComplianceStandardNone ComplianceStandard = `NONE` const ComplianceStandardPciDss ComplianceStandard = `PCI_DSS` @@ -408,11 +410,11 @@ func (f *ComplianceStandard) String() string { // Set raw string value and validate it against allowed values func (f *ComplianceStandard) Set(v string) error { switch v { - case `CANADA_PROTECTED_B`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `HITRUST`, `IRAP_PROTECTED`, `ISMAP`, `ITAR_EAR`, `NONE`, `PCI_DSS`: + case `CANADA_PROTECTED_B`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `HITRUST`, `IRAP_PROTECTED`, `ISMAP`, `ITAR_EAR`, `K_FSI`, `NONE`, `PCI_DSS`: *f = ComplianceStandard(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CANADA_PROTECTED_B", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "HITRUST", "IRAP_PROTECTED", "ISMAP", "ITAR_EAR", "NONE", "PCI_DSS"`, v) + return fmt.Errorf(`value "%s" is not one of "CANADA_PROTECTED_B", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "HITRUST", "IRAP_PROTECTED", "ISMAP", "ITAR_EAR", "K_FSI", "NONE", "PCI_DSS"`, v) } } diff --git a/service/vectorsearch/api.go b/service/vectorsearch/api.go index c0e3af2fb..ee6304132 100755 --- a/service/vectorsearch/api.go +++ b/service/vectorsearch/api.go @@ -34,26 +34,46 @@ type VectorSearchEndpointsInterface interface { CreateEndpointAndWait(ctx context.Context, createEndpoint CreateEndpoint, options ...retries.Option[EndpointInfo]) (*EndpointInfo, error) // Delete an endpoint. + // + // Delete a vector search endpoint. DeleteEndpoint(ctx context.Context, request DeleteEndpointRequest) error // Delete an endpoint. + // + // Delete a vector search endpoint. DeleteEndpointByEndpointName(ctx context.Context, endpointName string) error // Get an endpoint. + // + // Get details for a single vector search endpoint. GetEndpoint(ctx context.Context, request GetEndpointRequest) (*EndpointInfo, error) // Get an endpoint. + // + // Get details for a single vector search endpoint. GetEndpointByEndpointName(ctx context.Context, endpointName string) (*EndpointInfo, error) // List all endpoints. // + // List all vector search endpoints in the workspace. + // // This method is generated by Databricks SDK Code Generator. ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] // List all endpoints. // + // List all vector search endpoints in the workspace. + // // This method is generated by Databricks SDK Code Generator. ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) + + // Update the budget policy of an endpoint. + // + // Update the budget policy of an endpoint + UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) + + // Update the custom tags of an endpoint. + UpdateEndpointCustomTags(ctx context.Context, request UpdateEndpointCustomTagsRequest) (*UpdateEndpointCustomTagsResponse, error) } func NewVectorSearchEndpoints(client *client.DatabricksClient) *VectorSearchEndpointsAPI { @@ -173,6 +193,8 @@ func (a *VectorSearchEndpointsAPI) CreateEndpointAndWait(ctx context.Context, cr } // Delete an endpoint. +// +// Delete a vector search endpoint. func (a *VectorSearchEndpointsAPI) DeleteEndpointByEndpointName(ctx context.Context, endpointName string) error { return a.vectorSearchEndpointsImpl.DeleteEndpoint(ctx, DeleteEndpointRequest{ EndpointName: endpointName, @@ -180,6 +202,8 @@ func (a *VectorSearchEndpointsAPI) DeleteEndpointByEndpointName(ctx context.Cont } // Get an endpoint. +// +// Get details for a single vector search endpoint. func (a *VectorSearchEndpointsAPI) GetEndpointByEndpointName(ctx context.Context, endpointName string) (*EndpointInfo, error) { return a.vectorSearchEndpointsImpl.GetEndpoint(ctx, GetEndpointRequest{ EndpointName: endpointName, @@ -191,13 +215,18 @@ type VectorSearchIndexesInterface interface { // Create an index. // // Create a new index. - CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) + CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*VectorIndex, error) // Delete data from index. // // Handles the deletion of data from a specified vector index. DeleteDataVectorIndex(ctx context.Context, request DeleteDataVectorIndexRequest) (*DeleteDataVectorIndexResponse, error) + // Delete data from index. + // + // Handles the deletion of data from a specified vector index. + DeleteDataVectorIndexByIndexName(ctx context.Context, indexName string) (*DeleteDataVectorIndexResponse, error) + // Delete an index. // // Delete an index. @@ -272,16 +301,25 @@ func NewVectorSearchIndexes(client *client.DatabricksClient) *VectorSearchIndexe // supports real-time and efficient approximate nearest neighbor (ANN) search // queries. // -// There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index +// There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index // that automatically syncs with a source Delta Table, automatically and // incrementally updating the index as the underlying data in the Delta Table -// changes. * **Direct Vector Access Index**: An index that supports direct read +// changes. - **Direct Vector Access Index**: An index that supports direct read // and write of vectors and metadata through our REST and SDK APIs. With this // model, the user manages index updates. type VectorSearchIndexesAPI struct { vectorSearchIndexesImpl } +// Delete data from index. +// +// Handles the deletion of data from a specified vector index. +func (a *VectorSearchIndexesAPI) DeleteDataVectorIndexByIndexName(ctx context.Context, indexName string) (*DeleteDataVectorIndexResponse, error) { + return a.vectorSearchIndexesImpl.DeleteDataVectorIndex(ctx, DeleteDataVectorIndexRequest{ + IndexName: indexName, + }) +} + // Delete an index. // // Delete an index. diff --git a/service/vectorsearch/impl.go b/service/vectorsearch/impl.go index 1deb7eea3..a11abb569 100755 --- a/service/vectorsearch/impl.go +++ b/service/vectorsearch/impl.go @@ -33,6 +33,7 @@ func (a *vectorSearchEndpointsImpl) DeleteEndpoint(ctx context.Context, request path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v", request.EndpointName) queryParams := make(map[string]any) headers := make(map[string]string) + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteEndpointResponse) return err } @@ -48,6 +49,8 @@ func (a *vectorSearchEndpointsImpl) GetEndpoint(ctx context.Context, request Get } // List all endpoints. +// +// List all vector search endpoints in the workspace. func (a *vectorSearchEndpointsImpl) ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] { getNextPage := func(ctx context.Context, req ListEndpointsRequest) (*ListEndpointResponse, error) { @@ -73,6 +76,8 @@ func (a *vectorSearchEndpointsImpl) ListEndpoints(ctx context.Context, request L } // List all endpoints. +// +// List all vector search endpoints in the workspace. func (a *vectorSearchEndpointsImpl) ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) { iterator := a.ListEndpoints(ctx, request) return listing.ToSlice[EndpointInfo](ctx, iterator) @@ -88,20 +93,42 @@ func (a *vectorSearchEndpointsImpl) internalListEndpoints(ctx context.Context, r return &listEndpointResponse, err } +func (a *vectorSearchEndpointsImpl) UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) { + var patchEndpointBudgetPolicyResponse PatchEndpointBudgetPolicyResponse + path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v/budget-policy", request.EndpointName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchEndpointBudgetPolicyResponse) + return &patchEndpointBudgetPolicyResponse, err +} + +func (a *vectorSearchEndpointsImpl) UpdateEndpointCustomTags(ctx context.Context, request UpdateEndpointCustomTagsRequest) (*UpdateEndpointCustomTagsResponse, error) { + var updateEndpointCustomTagsResponse UpdateEndpointCustomTagsResponse + path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v/tags", request.EndpointName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateEndpointCustomTagsResponse) + return &updateEndpointCustomTagsResponse, err +} + // unexported type that holds implementations of just VectorSearchIndexes API methods type vectorSearchIndexesImpl struct { client *client.DatabricksClient } -func (a *vectorSearchIndexesImpl) CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) { - var createVectorIndexResponse CreateVectorIndexResponse +func (a *vectorSearchIndexesImpl) CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*VectorIndex, error) { + var vectorIndex VectorIndex path := "/api/2.0/vector-search/indexes" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createVectorIndexResponse) - return &createVectorIndexResponse, err + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &vectorIndex) + return &vectorIndex, err } func (a *vectorSearchIndexesImpl) DeleteDataVectorIndex(ctx context.Context, request DeleteDataVectorIndexRequest) (*DeleteDataVectorIndexResponse, error) { @@ -110,8 +137,7 @@ func (a *vectorSearchIndexesImpl) DeleteDataVectorIndex(ctx context.Context, req queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteDataVectorIndexResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDataVectorIndexResponse) return &deleteDataVectorIndexResponse, err } @@ -120,6 +146,7 @@ func (a *vectorSearchIndexesImpl) DeleteIndex(ctx context.Context, request Delet path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v", request.IndexName) queryParams := make(map[string]any) headers := make(map[string]string) + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteIndexResponse) return err } @@ -217,6 +244,7 @@ func (a *vectorSearchIndexesImpl) SyncIndex(ctx context.Context, request SyncInd path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v/sync", request.IndexName) queryParams := make(map[string]any) headers := make(map[string]string) + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &syncIndexResponse) return err } diff --git a/service/vectorsearch/interface.go b/service/vectorsearch/interface.go index 326f552c8..e165c8042 100755 --- a/service/vectorsearch/interface.go +++ b/service/vectorsearch/interface.go @@ -15,25 +15,39 @@ type VectorSearchEndpointsService interface { CreateEndpoint(ctx context.Context, request CreateEndpoint) (*EndpointInfo, error) // Delete an endpoint. + // + // Delete a vector search endpoint. DeleteEndpoint(ctx context.Context, request DeleteEndpointRequest) error // Get an endpoint. + // + // Get details for a single vector search endpoint. GetEndpoint(ctx context.Context, request GetEndpointRequest) (*EndpointInfo, error) // List all endpoints. // + // List all vector search endpoints in the workspace. + // // Use ListEndpointsAll() to get all EndpointInfo instances, which will iterate over every result page. ListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) + + // Update the budget policy of an endpoint. + // + // Update the budget policy of an endpoint + UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) + + // Update the custom tags of an endpoint. + UpdateEndpointCustomTags(ctx context.Context, request UpdateEndpointCustomTagsRequest) (*UpdateEndpointCustomTagsResponse, error) } // **Index**: An efficient representation of your embedding vectors that // supports real-time and efficient approximate nearest neighbor (ANN) search // queries. // -// There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index +// There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index // that automatically syncs with a source Delta Table, automatically and // incrementally updating the index as the underlying data in the Delta Table -// changes. * **Direct Vector Access Index**: An index that supports direct read +// changes. - **Direct Vector Access Index**: An index that supports direct read // and write of vectors and metadata through our REST and SDK APIs. With this // model, the user manages index updates. type VectorSearchIndexesService interface { @@ -41,7 +55,7 @@ type VectorSearchIndexesService interface { // Create an index. // // Create a new index. - CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) + CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*VectorIndex, error) // Delete data from index. // diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index 4e5abe555..d9727496d 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -24,10 +24,22 @@ func (s ColumnInfo) MarshalJSON() ([]byte, error) { } type CreateEndpoint struct { - // Type of endpoint. + // The budget policy id to be applied + BudgetPolicyId string `json:"budget_policy_id,omitempty"` + // Type of endpoint EndpointType EndpointType `json:"endpoint_type"` - // Name of endpoint + // Name of the vector search endpoint Name string `json:"name"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CreateEndpoint) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateEndpoint) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type CreateVectorIndexRequest struct { @@ -39,13 +51,12 @@ type CreateVectorIndexRequest struct { DirectAccessIndexSpec *DirectAccessVectorIndexSpec `json:"direct_access_index_spec,omitempty"` // Name of the endpoint to be used for serving the index EndpointName string `json:"endpoint_name"` - // There are 2 types of Vector Search indexes: - // - // - `DELTA_SYNC`: An index that automatically syncs with a source Delta - // Table, automatically and incrementally updating the index as the - // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index - // that supports direct read and write of vectors and metadata through our - // REST and SDK APIs. With this model, the user manages index updates. + // There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that + // automatically syncs with a source Delta Table, automatically and + // incrementally updating the index as the underlying data in the Delta + // Table changes. - `DIRECT_ACCESS`: An index that supports direct read and + // write of vectors and metadata through our REST and SDK APIs. With this + // model, the user manages index updates. IndexType VectorIndexType `json:"index_type"` // Name of the index Name string `json:"name"` @@ -53,11 +64,23 @@ type CreateVectorIndexRequest struct { PrimaryKey string `json:"primary_key"` } -type CreateVectorIndexResponse struct { - VectorIndex *VectorIndex `json:"vector_index,omitempty"` +type CustomTag struct { + // Key field for a vector search endpoint tag. + Key string `json:"key"` + // [Optional] Value field for a vector search endpoint tag. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CustomTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CustomTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } -// Result of the upsert or delete operation. type DeleteDataResult struct { // List of primary keys for rows that failed to process. FailedPrimaryKeys []string `json:"failed_primary_keys,omitempty"` @@ -75,7 +98,6 @@ func (s DeleteDataResult) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Status of the delete operation. type DeleteDataStatus string const DeleteDataStatusFailure DeleteDataStatus = `FAILURE` @@ -105,16 +127,15 @@ func (f *DeleteDataStatus) Type() string { return "DeleteDataStatus" } -// Request payload for deleting data from a vector index. +// Delete data from index type DeleteDataVectorIndexRequest struct { // Name of the vector index where data is to be deleted. Must be a Direct // Vector Access Index. IndexName string `json:"-" url:"-"` // List of primary keys for the data to be deleted. - PrimaryKeys []string `json:"primary_keys"` + PrimaryKeys []string `json:"-" url:"primary_keys"` } -// Response to a delete data vector index request. type DeleteDataVectorIndexResponse struct { // Result of the upsert or delete operation. Result *DeleteDataResult `json:"result,omitempty"` @@ -124,7 +145,7 @@ type DeleteDataVectorIndexResponse struct { // Delete an endpoint type DeleteEndpointRequest struct { - // Name of the endpoint + // Name of the vector search endpoint EndpointName string `json:"-" url:"-"` } @@ -148,21 +169,18 @@ type DeltaSyncVectorIndexSpecRequest struct { ColumnsToSync []string `json:"columns_to_sync,omitempty"` // The columns that contain the embedding source. EmbeddingSourceColumns []EmbeddingSourceColumn `json:"embedding_source_columns,omitempty"` - // The columns that contain the embedding vectors. The format should be - // array[double]. + // The columns that contain the embedding vectors. EmbeddingVectorColumns []EmbeddingVectorColumn `json:"embedding_vector_columns,omitempty"` - // [Optional] Automatically sync the vector index contents and computed - // embeddings to the specified Delta table. The only supported table name is - // the index name with the suffix `_writeback_table`. + // [Optional] Name of the Delta table to sync the vector index contents and + // computed embeddings to. EmbeddingWritebackTable string `json:"embedding_writeback_table,omitempty"` - // Pipeline execution mode. - // - // - `TRIGGERED`: If the pipeline uses the triggered execution mode, the - // system stops processing after successfully refreshing the source table in - // the pipeline once, ensuring the table is updated based on the data - // available when the update started. - `CONTINUOUS`: If the pipeline uses - // continuous execution, the pipeline processes new data as it arrives in - // the source table to keep vector index fresh. + // Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the + // triggered execution mode, the system stops processing after successfully + // refreshing the source table in the pipeline once, ensuring the table is + // updated based on the data available when the update started. - + // `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline + // processes new data as it arrives in the source table to keep vector index + // fresh. PipelineType PipelineType `json:"pipeline_type,omitempty"` // The name of the source table. SourceTable string `json:"source_table,omitempty"` @@ -188,14 +206,13 @@ type DeltaSyncVectorIndexSpecResponse struct { EmbeddingWritebackTable string `json:"embedding_writeback_table,omitempty"` // The ID of the pipeline that is used to sync the index. PipelineId string `json:"pipeline_id,omitempty"` - // Pipeline execution mode. - // - // - `TRIGGERED`: If the pipeline uses the triggered execution mode, the - // system stops processing after successfully refreshing the source table in - // the pipeline once, ensuring the table is updated based on the data - // available when the update started. - `CONTINUOUS`: If the pipeline uses - // continuous execution, the pipeline processes new data as it arrives in - // the source table to keep vector index fresh. + // Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the + // triggered execution mode, the system stops processing after successfully + // refreshing the source table in the pipeline once, ensuring the table is + // updated based on the data available when the update started. - + // `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline + // processes new data as it arrives in the source table to keep vector index + // fresh. PipelineType PipelineType `json:"pipeline_type,omitempty"` // The name of the source table. SourceTable string `json:"source_table,omitempty"` @@ -212,15 +229,14 @@ func (s DeltaSyncVectorIndexSpecResponse) MarshalJSON() ([]byte, error) { } type DirectAccessVectorIndexSpec struct { - // Contains the optional model endpoint to use during query time. + // The columns that contain the embedding source. The format should be + // array[double]. EmbeddingSourceColumns []EmbeddingSourceColumn `json:"embedding_source_columns,omitempty"` - + // The columns that contain the embedding vectors. The format should be + // array[double]. EmbeddingVectorColumns []EmbeddingVectorColumn `json:"embedding_vector_columns,omitempty"` - // The schema of the index in JSON format. - // - // Supported types are `integer`, `long`, `float`, `double`, `boolean`, - // `string`, `date`, `timestamp`. - // + // The schema of the index in JSON format. Supported types are `integer`, + // `long`, `float`, `double`, `boolean`, `string`, `date`, `timestamp`. // Supported types for vector column: `array`, `array`,`. SchemaJson string `json:"schema_json,omitempty"` @@ -274,9 +290,13 @@ type EndpointInfo struct { CreationTimestamp int64 `json:"creation_timestamp,omitempty"` // Creator of the endpoint Creator string `json:"creator,omitempty"` + // The custom tags assigned to the endpoint + CustomTags []CustomTag `json:"custom_tags,omitempty"` + // The budget policy id applied to the endpoint + EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` // Current status of the endpoint EndpointStatus *EndpointStatus `json:"endpoint_status,omitempty"` - // Type of endpoint. + // Type of endpoint EndpointType EndpointType `json:"endpoint_type,omitempty"` // Unique identifier of the endpoint Id string `json:"id,omitempty"` @@ -284,7 +304,7 @@ type EndpointInfo struct { LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` // User who last updated the endpoint LastUpdatedUser string `json:"last_updated_user,omitempty"` - // Name of endpoint + // Name of the vector search endpoint Name string `json:"name,omitempty"` // Number of indexes on the endpoint NumIndexes int `json:"num_indexes,omitempty"` @@ -438,7 +458,13 @@ func (s ListIndexesRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// copied from proto3 / Google Well Known Types, source: +// https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. type ListValue struct { + // Repeated field of dynamically typed values. Values []Value `json:"values,omitempty"` } @@ -483,13 +509,12 @@ type MiniVectorIndex struct { Creator string `json:"creator,omitempty"` // Name of the endpoint associated with the index EndpointName string `json:"endpoint_name,omitempty"` - // There are 2 types of Vector Search indexes: - // - // - `DELTA_SYNC`: An index that automatically syncs with a source Delta - // Table, automatically and incrementally updating the index as the - // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index - // that supports direct read and write of vectors and metadata through our - // REST and SDK APIs. With this model, the user manages index updates. + // There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that + // automatically syncs with a source Delta Table, automatically and + // incrementally updating the index as the underlying data in the Delta + // Table changes. - `DIRECT_ACCESS`: An index that supports direct read and + // write of vectors and metadata through our REST and SDK APIs. With this + // model, the user manages index updates. IndexType VectorIndexType `json:"index_type,omitempty"` // Name of the index Name string `json:"name,omitempty"` @@ -507,14 +532,34 @@ func (s MiniVectorIndex) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Pipeline execution mode. -// -// - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system -// stops processing after successfully refreshing the source table in the -// pipeline once, ensuring the table is updated based on the data available when -// the update started. - `CONTINUOUS`: If the pipeline uses continuous -// execution, the pipeline processes new data as it arrives in the source table -// to keep vector index fresh. +type PatchEndpointBudgetPolicyRequest struct { + // The budget policy id to be applied + BudgetPolicyId string `json:"budget_policy_id"` + // Name of the vector search endpoint + EndpointName string `json:"-" url:"-"` +} + +type PatchEndpointBudgetPolicyResponse struct { + // The budget policy applied to the vector search endpoint. + EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *PatchEndpointBudgetPolicyResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PatchEndpointBudgetPolicyResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered +// execution mode, the system stops processing after successfully refreshing the +// source table in the pipeline once, ensuring the table is updated based on the +// data available when the update started. - `CONTINUOUS`: If the pipeline uses +// continuous execution, the pipeline processes new data as it arrives in the +// source table to keep vector index fresh. type PipelineType string // If the pipeline uses continuous execution, the pipeline processes new data as @@ -576,10 +621,12 @@ type QueryVectorIndexRequest struct { ColumnsToRerank []string `json:"columns_to_rerank,omitempty"` // JSON string representing query filters. // - // Example filters: - `{"id <": 5}`: Filter for id less than 5. - `{"id >": - // 5}`: Filter for id greater than 5. - `{"id <=": 5}`: Filter for id less - // than equal to 5. - `{"id >=": 5}`: Filter for id greater than equal to 5. - // - `{"id": 5}`: Filter for id equal to 5. + // Example filters: + // + // - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for + // id greater than 5. - `{"id <=": 5}`: Filter for id less than equal to 5. + // - `{"id >=": 5}`: Filter for id greater than equal to 5. - `{"id": 5}`: + // Filter for id equal to 5. FiltersJson string `json:"filters_json,omitempty"` // Name of the vector index to query. IndexName string `json:"-" url:"-"` @@ -631,7 +678,7 @@ func (s QueryVectorIndexResponse) MarshalJSON() ([]byte, error) { // Data returned in the query result. type ResultData struct { // Data rows returned in the query. - DataArray [][]string `json:"data_array,omitempty"` + DataArray []ListValue `json:"data_array,omitempty"` // Number of rows in the result set. RowCount int `json:"row_count,omitempty"` @@ -664,7 +711,6 @@ func (s ResultManifest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Request payload for scanning data from a vector index. type ScanVectorIndexRequest struct { // Name of the vector index to scan. IndexName string `json:"-" url:"-"` @@ -702,6 +748,15 @@ func (s ScanVectorIndexResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// copied from proto3 / Google Well Known Types, source: +// https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto +// `Struct` represents a structured data value, consisting of fields which map +// to dynamically typed values. In some languages, `Struct` might be supported +// by a native representation. For example, in scripting languages like JS a +// struct is represented as an object. The details of that representation are +// described together with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. type Struct struct { // Data entry, corresponding to a row in a vector index. Fields []MapStringValueEntry `json:"fields,omitempty"` @@ -716,7 +771,30 @@ type SyncIndexRequest struct { type SyncIndexResponse struct { } -// Result of the upsert or delete operation. +type UpdateEndpointCustomTagsRequest struct { + // The new custom tags for the vector search endpoint + CustomTags []CustomTag `json:"custom_tags"` + // Name of the vector search endpoint + EndpointName string `json:"-" url:"-"` +} + +type UpdateEndpointCustomTagsResponse struct { + // All the custom tags that are applied to the vector search endpoint. + CustomTags []CustomTag `json:"custom_tags,omitempty"` + // The name of the vector search endpoint whose custom tags were updated. + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *UpdateEndpointCustomTagsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateEndpointCustomTagsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type UpsertDataResult struct { // List of primary keys for rows that failed to process. FailedPrimaryKeys []string `json:"failed_primary_keys,omitempty"` @@ -734,7 +812,6 @@ func (s UpsertDataResult) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Status of the upsert operation. type UpsertDataStatus string const UpsertDataStatusFailure UpsertDataStatus = `FAILURE` @@ -764,7 +841,6 @@ func (f *UpsertDataStatus) Type() string { return "UpsertDataStatus" } -// Request payload for upserting data into a vector index. type UpsertDataVectorIndexRequest struct { // Name of the vector index where data is to be upserted. Must be a Direct // Vector Access Index. @@ -773,7 +849,6 @@ type UpsertDataVectorIndexRequest struct { InputsJson string `json:"inputs_json"` } -// Response to an upsert data vector index request. type UpsertDataVectorIndexResponse struct { // Result of the upsert or delete operation. Result *UpsertDataResult `json:"result,omitempty"` @@ -783,15 +858,26 @@ type UpsertDataVectorIndexResponse struct { type Value struct { BoolValue bool `json:"bool_value,omitempty"` - + // copied from proto3 / Google Well Known Types, source: + // https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto + // `ListValue` is a wrapper around a repeated field of values. + // + // The JSON representation for `ListValue` is JSON array. ListValue *ListValue `json:"list_value,omitempty"` - NullValue string `json:"null_value,omitempty"` - NumberValue float64 `json:"number_value,omitempty"` StringValue string `json:"string_value,omitempty"` - + // copied from proto3 / Google Well Known Types, source: + // https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto + // `Struct` represents a structured data value, consisting of fields which + // map to dynamically typed values. In some languages, `Struct` might be + // supported by a native representation. For example, in scripting languages + // like JS a struct is represented as an object. The details of that + // representation are described together with the proto support for the + // language. + // + // The JSON representation for `Struct` is JSON object. StructValue *Struct `json:"struct_value,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -814,13 +900,12 @@ type VectorIndex struct { DirectAccessIndexSpec *DirectAccessVectorIndexSpec `json:"direct_access_index_spec,omitempty"` // Name of the endpoint associated with the index EndpointName string `json:"endpoint_name,omitempty"` - // There are 2 types of Vector Search indexes: - // - // - `DELTA_SYNC`: An index that automatically syncs with a source Delta - // Table, automatically and incrementally updating the index as the - // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index - // that supports direct read and write of vectors and metadata through our - // REST and SDK APIs. With this model, the user manages index updates. + // There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that + // automatically syncs with a source Delta Table, automatically and + // incrementally updating the index as the underlying data in the Delta + // Table changes. - `DIRECT_ACCESS`: An index that supports direct read and + // write of vectors and metadata through our REST and SDK APIs. With this + // model, the user manages index updates. IndexType VectorIndexType `json:"index_type,omitempty"` // Name of the index Name string `json:"name,omitempty"` @@ -861,13 +946,12 @@ func (s VectorIndexStatus) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// There are 2 types of Vector Search indexes: -// -// - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, -// automatically and incrementally updating the index as the underlying data in -// the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct -// read and write of vectors and metadata through our REST and SDK APIs. With -// this model, the user manages index updates. +// There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that +// automatically syncs with a source Delta Table, automatically and +// incrementally updating the index as the underlying data in the Delta Table +// changes. - `DIRECT_ACCESS`: An index that supports direct read and write of +// vectors and metadata through our REST and SDK APIs. With this model, the user +// manages index updates. type VectorIndexType string // An index that automatically syncs with a source Delta Table, automatically diff --git a/workspace_client.go b/workspace_client.go index 736262051..13cd2134f 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -1075,10 +1075,10 @@ type WorkspaceClient struct { // supports real-time and efficient approximate nearest neighbor (ANN) // search queries. // - // There are 2 types of Vector Search indexes: * **Delta Sync Index**: An + // There are 2 types of Vector Search indexes: - **Delta Sync Index**: An // index that automatically syncs with a source Delta Table, automatically // and incrementally updating the index as the underlying data in the Delta - // Table changes. * **Direct Vector Access Index**: An index that supports + // Table changes. - **Direct Vector Access Index**: An index that supports // direct read and write of vectors and metadata through our REST and SDK // APIs. With this model, the user manages index updates. VectorSearchIndexes vectorsearch.VectorSearchIndexesInterface