From b83a7262d5dce413535faec96770d3276c270e30 Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Mon, 13 Jan 2025 10:58:37 +0100 Subject: [PATCH 01/54] [Release] Release v0.55.0 (#1117) ### Internal Changes * Bump staticcheck to 0.5.1 and add go 1.23 test coverage ([#1106](https://github.com/databricks/databricks-sdk-go/pull/1106)). * Bump x/net, x/crypto dependencies ([#1107](https://github.com/databricks/databricks-sdk-go/pull/1107)). * Create custom codeql.yml ([#1114](https://github.com/databricks/databricks-sdk-go/pull/1114)). * Decouple serving and oauth2 package ([#1110](https://github.com/databricks/databricks-sdk-go/pull/1110)). * Migrate workflows that need write access to use hosted runners ([#1112](https://github.com/databricks/databricks-sdk-go/pull/1112)). * Move package credentials in config ([#1115](https://github.com/databricks/databricks-sdk-go/pull/1115)). * Update Queries test ([#1104](https://github.com/databricks/databricks-sdk-go/pull/1104)). ### API Changes: * Added `NoCompute` field for [apps.CreateAppRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#CreateAppRequest). * Added `HasMore` field for [jobs.BaseJob](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#BaseJob). * Added `HasMore` field for [jobs.BaseRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#BaseRun). * Added `PageToken` field for [jobs.GetJobRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#GetJobRequest). * Added `HasMore` and `NextPageToken` fields for [jobs.Job](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Job). * Added `HasMore` field for [jobs.Run](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Run). * Added `RunAs` field for [pipelines.CreatePipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#CreatePipeline). * Added `RunAs` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline). * Added `AuthorizationDetails` and `EndpointUrl` fields for [serving.DataPlaneInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#DataPlaneInfo). * [Breaking] Changed `Update` method for [a.AccountFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#AccountFederationPolicyAPI) account-level service with new required argument order. * [Breaking] Changed `Update` method for [a.ServicePrincipalFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#ServicePrincipalFederationPolicyAPI) account-level service with new required argument order. * Changed `UpdateMask` field for [oauth2.UpdateAccountFederationPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#UpdateAccountFederationPolicyRequest) to no longer be required. * Changed `UpdateMask` field for [oauth2.UpdateServicePrincipalFederationPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#UpdateServicePrincipalFederationPolicyRequest) to no longer be required. * [Breaking] Changed `DaysOfWeek` field for [pipelines.RestartWindow](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#RestartWindow) to type [pipelines.DayOfWeekList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#DayOfWeekList). OpenAPI SHA: 779817ed8d63031f5ea761fbd25ee84f38feec0d, Date: 2025-01-08 --- .codegen/_openapi_sha | 2 +- CHANGELOG.md | 32 +++++++++++++++ service/oauth2/model.go | 44 +++++++++++++++----- service/pipelines/model.go | 82 +++++++++++++++++++------------------- service/pkg.go | 2 +- version/version.go | 2 +- 6 files changed, 110 insertions(+), 54 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index e43e9607f..dfe78790a 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -1668b0db17e23605f8c9d29fb3b674c01590732d \ No newline at end of file +779817ed8d63031f5ea761fbd25ee84f38feec0d \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index ca019db9a..a9036ffa8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,37 @@ # Version changelog +## [Release] Release v0.55.0 + +### Internal Changes + + * Bump staticcheck to 0.5.1 and add go 1.23 test coverage ([#1106](https://github.com/databricks/databricks-sdk-go/pull/1106)). + * Bump x/net, x/crypto dependencies ([#1107](https://github.com/databricks/databricks-sdk-go/pull/1107)). + * Create custom codeql.yml ([#1114](https://github.com/databricks/databricks-sdk-go/pull/1114)). + * Decouple serving and oauth2 package ([#1110](https://github.com/databricks/databricks-sdk-go/pull/1110)). + * Migrate workflows that need write access to use hosted runners ([#1112](https://github.com/databricks/databricks-sdk-go/pull/1112)). + * Move package credentials in config ([#1115](https://github.com/databricks/databricks-sdk-go/pull/1115)). + * Update Queries test ([#1104](https://github.com/databricks/databricks-sdk-go/pull/1104)). + + +### API Changes: + + * Added `NoCompute` field for [apps.CreateAppRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#CreateAppRequest). + * Added `HasMore` field for [jobs.BaseJob](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#BaseJob). + * Added `HasMore` field for [jobs.BaseRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#BaseRun). + * Added `PageToken` field for [jobs.GetJobRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#GetJobRequest). + * Added `HasMore` and `NextPageToken` fields for [jobs.Job](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Job). + * Added `HasMore` field for [jobs.Run](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Run). + * Added `RunAs` field for [pipelines.CreatePipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#CreatePipeline). + * Added `RunAs` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline). + * Added `AuthorizationDetails` and `EndpointUrl` fields for [serving.DataPlaneInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#DataPlaneInfo). + * [Breaking] Changed `Update` method for [a.AccountFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#AccountFederationPolicyAPI) account-level service with new required argument order. + * [Breaking] Changed `Update` method for [a.ServicePrincipalFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#ServicePrincipalFederationPolicyAPI) account-level service with new required argument order. + * Changed `UpdateMask` field for [oauth2.UpdateAccountFederationPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#UpdateAccountFederationPolicyRequest) to no longer be required. + * Changed `UpdateMask` field for [oauth2.UpdateServicePrincipalFederationPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#UpdateServicePrincipalFederationPolicyRequest) to no longer be required. + * [Breaking] Changed `DaysOfWeek` field for [pipelines.RestartWindow](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#RestartWindow) to type [pipelines.DayOfWeekList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#DayOfWeekList). + +OpenAPI SHA: 779817ed8d63031f5ea761fbd25ee84f38feec0d, Date: 2025-01-08 + ## [Release] Release v0.54.0 ### API Changes: diff --git a/service/oauth2/model.go b/service/oauth2/model.go index 08de08f93..a448d1690 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -625,11 +625,23 @@ type UpdateAccountFederationPolicyRequest struct { Policy *FederationPolicy `json:"policy,omitempty"` // The identifier for the federation policy. PolicyId string `json:"-" url:"-"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). - UpdateMask string `json:"-" url:"update_mask"` + // The field mask specifies which fields of the policy to update. To specify + // multiple fields in the field mask, use comma as the separator (no space). + // The special value '*' indicates that all fields should be updated (full + // replacement). If unspecified, all fields that are set in the policy + // provided in the update request will overwrite the corresponding fields in + // the existing policy. Example value: 'description,oidc_policy.audiences'. + UpdateMask string `json:"-" url:"update_mask,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateAccountFederationPolicyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateAccountFederationPolicyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type UpdateCustomAppIntegration struct { @@ -660,9 +672,21 @@ type UpdateServicePrincipalFederationPolicyRequest struct { PolicyId string `json:"-" url:"-"` // The service principal id for the federation policy. ServicePrincipalId int64 `json:"-" url:"-"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). - UpdateMask string `json:"-" url:"update_mask"` + // The field mask specifies which fields of the policy to update. To specify + // multiple fields in the field mask, use comma as the separator (no space). + // The special value '*' indicates that all fields should be updated (full + // replacement). If unspecified, all fields that are set in the policy + // provided in the update request will overwrite the corresponding fields in + // the existing policy. Example value: 'description,oidc_policy.audiences'. + UpdateMask string `json:"-" url:"update_mask,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *UpdateServicePrincipalFederationPolicyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateServicePrincipalFederationPolicyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } diff --git a/service/pipelines/model.go b/service/pipelines/model.go index 04f110841..c3df70125 100755 --- a/service/pipelines/model.go +++ b/service/pipelines/model.go @@ -140,6 +140,46 @@ func (s DataPlaneId) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Days of week in which the restart is allowed to happen (within a five-hour +// window starting at start_hour). If not specified all days of the week will be +// used. +type DayOfWeek string + +const DayOfWeekFriday DayOfWeek = `FRIDAY` + +const DayOfWeekMonday DayOfWeek = `MONDAY` + +const DayOfWeekSaturday DayOfWeek = `SATURDAY` + +const DayOfWeekSunday DayOfWeek = `SUNDAY` + +const DayOfWeekThursday DayOfWeek = `THURSDAY` + +const DayOfWeekTuesday DayOfWeek = `TUESDAY` + +const DayOfWeekWednesday DayOfWeek = `WEDNESDAY` + +// String representation for [fmt.Print] +func (f *DayOfWeek) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *DayOfWeek) Set(v string) error { + switch v { + case `FRIDAY`, `MONDAY`, `SATURDAY`, `SUNDAY`, `THURSDAY`, `TUESDAY`, `WEDNESDAY`: + *f = DayOfWeek(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY"`, v) + } +} + +// Type always returns DayOfWeek to satisfy [pflag.Value] interface +func (f *DayOfWeek) Type() string { + return "DayOfWeek" +} + // Delete a pipeline type DeletePipelineRequest struct { PipelineId string `json:"-" url:"-"` @@ -1329,7 +1369,7 @@ type RestartWindow struct { // Days of week in which the restart is allowed to happen (within a // five-hour window starting at start_hour). If not specified all days of // the week will be used. - DaysOfWeek []RestartWindowDaysOfWeek `json:"days_of_week,omitempty"` + DaysOfWeek []DayOfWeek `json:"days_of_week,omitempty"` // An integer between 0 and 23 denoting the start hour for the restart // window in the 24-hour day. Continuous pipeline restart is triggered only // within a five-hour window starting at this hour. @@ -1350,46 +1390,6 @@ func (s RestartWindow) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Days of week in which the restart is allowed to happen (within a five-hour -// window starting at start_hour). If not specified all days of the week will be -// used. -type RestartWindowDaysOfWeek string - -const RestartWindowDaysOfWeekFriday RestartWindowDaysOfWeek = `FRIDAY` - -const RestartWindowDaysOfWeekMonday RestartWindowDaysOfWeek = `MONDAY` - -const RestartWindowDaysOfWeekSaturday RestartWindowDaysOfWeek = `SATURDAY` - -const RestartWindowDaysOfWeekSunday RestartWindowDaysOfWeek = `SUNDAY` - -const RestartWindowDaysOfWeekThursday RestartWindowDaysOfWeek = `THURSDAY` - -const RestartWindowDaysOfWeekTuesday RestartWindowDaysOfWeek = `TUESDAY` - -const RestartWindowDaysOfWeekWednesday RestartWindowDaysOfWeek = `WEDNESDAY` - -// String representation for [fmt.Print] -func (f *RestartWindowDaysOfWeek) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *RestartWindowDaysOfWeek) Set(v string) error { - switch v { - case `FRIDAY`, `MONDAY`, `SATURDAY`, `SUNDAY`, `THURSDAY`, `TUESDAY`, `WEDNESDAY`: - *f = RestartWindowDaysOfWeek(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "FRIDAY", "MONDAY", "SATURDAY", "SUNDAY", "THURSDAY", "TUESDAY", "WEDNESDAY"`, v) - } -} - -// Type always returns RestartWindowDaysOfWeek to satisfy [pflag.Value] interface -func (f *RestartWindowDaysOfWeek) Type() string { - return "RestartWindowDaysOfWeek" -} - // Write-only setting, available only in Create/Update calls. Specifies the user // or service principal that the pipeline runs as. If not specified, the // pipeline runs as the user who created the pipeline. diff --git a/service/pkg.go b/service/pkg.go index b891a9547..0e5a53e7e 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -323,8 +323,8 @@ var ( _ *marketplace.ConsumerListingsAPI = nil _ *marketplace.ConsumerPersonalizationRequestsAPI = nil _ *marketplace.ConsumerProvidersAPI = nil - _ *provisioning.CredentialsAPI = nil _ *catalog.CredentialsAPI = nil + _ *provisioning.CredentialsAPI = nil _ *settings.CredentialsManagerAPI = nil _ *settings.CspEnablementAccountAPI = nil _ *iam.CurrentUserAPI = nil diff --git a/version/version.go b/version/version.go index bac14e8c6..142d27dac 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.54.0" +const Version = "0.55.0" From 914ab6b7e8e48ca6da6803c10c2d720ba496cd87 Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Mon, 13 Jan 2025 18:40:18 +0100 Subject: [PATCH 02/54] [Internal] Scope the traversing directory in the Recursive list workspace test (#1120) ## What changes are proposed in this pull request? What - The PR modifies the scope of directory traversal in the recursive list test. It limits the search to the ".sdk" directory, where the relevant notebook is created. Why - The test was previously hitting API rate limits because it was scanning too many elements in the current traversing directory. By narrowing the scope to the ".sdk" directory, the test can now run more efficiently without encountering these limits. ## How is this tested? This is itself an integration test. --- internal/workspace_test.go | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/internal/workspace_test.go b/internal/workspace_test.go index 8ef97f14a..c2a82025b 100644 --- a/internal/workspace_test.go +++ b/internal/workspace_test.go @@ -16,7 +16,7 @@ import ( func myNotebookPath(t *testing.T, w *databricks.WorkspaceClient) string { ctx := context.Background() - testDir := filepath.Join("/Users", me(t, w).UserName, ".sdk", RandomName("t-")) + testDir := filepath.Join("/Users", me(t, w).UserName, ".sdk", "notebooks", RandomName("t-")) notebook := filepath.Join(testDir, RandomName("n-")) err := w.Workspace.MkdirsByPath(ctx, testDir) @@ -224,7 +224,7 @@ func TestAccWorkspaceRecursiveListNoTranspile(t *testing.T) { workspace.UploadOverwrite()) require.NoError(t, err) - allMyNotebooks, err := w.Workspace.RecursiveList(ctx, filepath.Join("/Users", me(t, w).UserName)) + allMyNotebooks, err := w.Workspace.RecursiveList(ctx, filepath.Join("/Users", me(t, w).UserName, ".sdk")) require.NoError(t, err) assert.True(t, len(allMyNotebooks) >= 1) } From 1045fb9697db505f5fd1ca0ebe4be8b6479df981 Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Sun, 19 Jan 2025 01:54:15 +0100 Subject: [PATCH 03/54] [Internal] Delete examples/mocking module (#1126) ## What changes are proposed in this pull request? This PR deletes the `examples/mocking` directory as it is no longer maintained. This mocker example is built on the assumption that every `service` has `WithImpl()` that is used to set the stubs for every `service`. But this is no longer the case. This mocker is not even able to compile. ## How is this tested? N/A --- examples/mocking/.gitignore | 4 --- examples/mocking/README.md | 62 ----------------------------------- examples/mocking/dbfs_test.go | 48 --------------------------- examples/mocking/go.mod | 11 ------- 4 files changed, 125 deletions(-) delete mode 100644 examples/mocking/.gitignore delete mode 100644 examples/mocking/README.md delete mode 100644 examples/mocking/dbfs_test.go delete mode 100644 examples/mocking/go.mod diff --git a/examples/mocking/.gitignore b/examples/mocking/.gitignore deleted file mode 100644 index 8915b6bcf..000000000 --- a/examples/mocking/.gitignore +++ /dev/null @@ -1,4 +0,0 @@ -go.sum -vendor -mocks -mocks/* \ No newline at end of file diff --git a/examples/mocking/README.md b/examples/mocking/README.md deleted file mode 100644 index 1151ff8b2..000000000 --- a/examples/mocking/README.md +++ /dev/null @@ -1,62 +0,0 @@ -# Interoperability with `gomock` - -When developing large applications, you find yourself in need of mocking APIs. For Go, there's [`gomock`](https://github.com/golang/mock) framework for code generating testing mocks. In this small example, we'll show how to use `gomock` with Databricks SDK for Go. - -Please read through [`dbfs_test.go`](dbfs_test.go) test example. - -## Declaring which mocks to generate - -```go -//go:generate go run github.com/golang/mock/mockgen@latest -package=mocks -destination=mocks/dbfs.go github.com/databricks/databricks-sdk-go/service/dbfs DbfsService -``` - -* `go run github.com/golang/mock/mockgen@latest` downloads and executes the latest version of `mockgen` command -* `-package=mocks` instructs to generate mocks in the `mocks` package -* `-destination=mocks/dbfs.go` instructs to create `dbfs.go` file with mock stubs. -* `github.com/databricks/databricks-sdk-go/service/dbfs` tells which Databricks package to look services in. -* `DbfsService` tells which services to generate mocks for. - -## Initializing `gomock` - -Every test needs the following preamble: - -```go -ctrl := gomock.NewController(t) -defer ctrl.Finish() -``` - -## Mocking individual methods with `gomock` - -Every actual method call must be mocked for the test to pass: - -```go -mockDbfs := mocks.NewMockDbfsService(ctrl) -mockDbfs.EXPECT().Create(gomock.Any(), gomock.Eq(dbfs.Create{ - Path: "/a/b/c", - Overwrite: true, -})).Return(&dbfs.CreateResponse{ - Handle: 123, -}, nil) -``` - -## Testing idioms with Databricks SDK for Go - -You can stub out the HTTP request flow with `httpclient/fixtures.MappingTransport` and `httpclient/fixtures.SliceTransport`. - -Every service has a public `WithImpl()` method, that you can use to set the stubs for every service that is called in the unit tests. - -```go -w := workspaces.New(&databricks.Config{ - HTTPTransport: fixtures.MappingTransport{ - //... - } -}) -w.Dbfs.WithImpl(mockDbfs) -``` - -## Running this example - -1. Run `go mod tidy` in this folder to create `go.sum` file to pick dependency versions. -2. Run `go mod vendor` to download dependencies into `vendor/` directory. -3. Run `go generate ./...` to create `mocks/` directory. -4. Run `go test ./...` to invoke tests with mocks. \ No newline at end of file diff --git a/examples/mocking/dbfs_test.go b/examples/mocking/dbfs_test.go deleted file mode 100644 index 34898c0fb..000000000 --- a/examples/mocking/dbfs_test.go +++ /dev/null @@ -1,48 +0,0 @@ -package mocking - -import ( - "context" - "mocking/mocks" - "strings" - "testing" - - "github.com/golang/mock/gomock" - _ "github.com/golang/mock/mockgen/model" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" - - "github.com/databricks/databricks-sdk-go" - "github.com/databricks/databricks-sdk-go/service/dbfs" -) - -//go:generate go run github.com/golang/mock/mockgen@latest -package=mocks -destination=mocks/dbfs.go github.com/databricks/databricks-sdk-go/service/dbfs DbfsService - -func TestDbfsHighLevelAPI(t *testing.T) { - ctrl := gomock.NewController(t) - defer ctrl.Finish() - - mockDbfs := mocks.NewMockDbfsService(ctrl) - - ctx := context.Background() - mockDbfs.EXPECT().Create(gomock.Any(), gomock.Eq(dbfs.Create{ - Path: "/a/b/c", - Overwrite: true, - })).Return(&dbfs.CreateResponse{ - Handle: 123, - }, nil) - mockDbfs.EXPECT().AddBlock(gomock.Any(), gomock.Eq(dbfs.AddBlock{ - Handle: 123, - Data: "YWJj", - })) - mockDbfs.EXPECT().Close(gomock.Any(), gomock.Eq(dbfs.Close{ - Handle: 123, - })) - - w, err := databricks.NewWorkspaceClient() - require.NoError(t, err) - - w.Dbfs.WithImpl(mockDbfs) - - err = w.Dbfs.Overwrite(ctx, "/a/b/c", strings.NewReader("abc")) - assert.NoError(t, err) -} diff --git a/examples/mocking/go.mod b/examples/mocking/go.mod deleted file mode 100644 index c2366a257..000000000 --- a/examples/mocking/go.mod +++ /dev/null @@ -1,11 +0,0 @@ -module mocking - -go 1.18 - -require ( - github.com/databricks/databricks-sdk-go v0.0.0 - github.com/golang/mock v1.6.0 - github.com/stretchr/testify v1.8.1 -) - -replace github.com/databricks/databricks-sdk-go v0.0.0 => ../.. From e079db96f33d53d6659b222a905da366dbab576b Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Mon, 20 Jan 2025 16:12:59 +0100 Subject: [PATCH 04/54] [Fix] Support Query parameters for all HTTP operations (#1124) ## What changes are proposed in this pull request? Support Query parameters for all HTTP operations. Previously, only GET/HEAD/DELETE were supported. ## How is this tested? * Added Unit Tests * Run Integration Tests --- client/client.go | 3 +- client/client_test.go | 169 ++++++++++++-- httpclient/api_client.go | 10 +- httpclient/api_client_test.go | 18 ++ httpclient/request.go | 51 ++++- httpclient/request_test.go | 41 +++- service/apps/impl.go | 43 ++-- service/billing/impl.go | 36 ++- service/catalog/impl.go | 345 +++++++++++++++++++---------- service/cleanrooms/impl.go | 36 ++- service/compute/impl.go | 189 ++++++++++------ service/dashboards/impl.go | 69 ++++-- service/files/impl.go | 55 +++-- service/iam/impl.go | 168 +++++++++----- service/jobs/impl.go | 69 ++++-- service/marketplace/impl.go | 150 ++++++++----- service/ml/impl.go | 198 +++++++++++------ service/oauth2/impl.go | 76 +++++-- service/pipelines/impl.go | 42 ++-- service/pkg.go | 4 +- service/provisioning/impl.go | 90 +++++--- service/serving/impl.go | 54 +++-- service/settings/impl.go | 219 ++++++++++++------ service/sharing/impl.go | 68 ++++-- service/sql/impl.go | 177 ++++++++++----- service/vectorsearch/impl.go | 42 ++-- service/workspace/ext_utilities.go | 4 +- service/workspace/impl.go | 105 ++++++--- 28 files changed, 1741 insertions(+), 790 deletions(-) diff --git a/client/client.go b/client/client.go index 175a57351..cd4d804a1 100644 --- a/client/client.go +++ b/client/client.go @@ -63,12 +63,13 @@ func (c *DatabricksClient) GetOAuthToken(ctx context.Context, authDetails string // Do sends an HTTP request against path. func (c *DatabricksClient) Do(ctx context.Context, method, path string, - headers map[string]string, request, response any, + headers map[string]string, queryParams map[string]any, request, response any, visitors ...func(*http.Request) error) error { opts := []httpclient.DoOption{} for _, v := range visitors { opts = append(opts, httpclient.WithRequestVisitor(v)) } + opts = append(opts, httpclient.WithQueryParameters(queryParams)) opts = append(opts, httpclient.WithRequestHeaders(headers)) opts = append(opts, httpclient.WithRequestData(request)) opts = append(opts, httpclient.WithResponseUnmarshal(response)) diff --git a/client/client_test.go b/client/client_test.go index b701045c4..267635946 100644 --- a/client/client_test.go +++ b/client/client_test.go @@ -43,11 +43,15 @@ func TestSimpleRequestFailsURLError(t *testing.T) { }), }) require.NoError(t, err) - err = c.Do(context.Background(), "GET", "/a/b", map[string]string{ - "e": "f", - }, map[string]string{ - "c": "d", - }, nil) + err = c.Do( + context.Background(), + "GET", + "/a/b", + map[string]string{"e": "f"}, + nil, + map[string]string{"c": "d"}, + nil, + ) require.EqualError(t, err, `Get "https://some/a/b?c=d": nope`) } @@ -66,11 +70,15 @@ func TestSimpleRequestFailsAPIError(t *testing.T) { }), }) require.NoError(t, err) - err = c.Do(context.Background(), "GET", "/a/b", map[string]string{ - "e": "f", - }, map[string]string{ - "c": "d", - }, nil) + err = c.Do( + context.Background(), + "GET", + "/a/b", + map[string]string{"e": "f"}, + nil, + map[string]string{"c": "d"}, + nil, + ) require.EqualError(t, err, "nope") require.ErrorIs(t, err, apierr.ErrInvalidParameterValue) } @@ -115,11 +123,15 @@ func TestETag(t *testing.T) { }), }) require.NoError(t, err) - err = c.Do(context.Background(), "GET", "/a/b", map[string]string{ - "e": "f", - }, map[string]string{ - "c": "d", - }, nil) + err = c.Do( + context.Background(), + "GET", + "/a/b", + map[string]string{"e": "f"}, + nil, + map[string]string{"c": "d"}, + nil, + ) details := apierr.GetErrorInfo(err) require.Equal(t, 1, len(details)) errorDetails := details[0] @@ -148,7 +160,52 @@ func TestSimpleRequestSucceeds(t *testing.T) { }) require.NoError(t, err) var resp Dummy - err = c.Do(context.Background(), "POST", "/c", nil, Dummy{1}, &resp) + err = c.Do( + context.Background(), + "POST", + "/c", + nil, + nil, + Dummy{1}, + &resp, + ) + require.NoError(t, err) + require.Equal(t, 2, resp.Foo) +} + +func TestQueryParamsRequestSucceeds(t *testing.T) { + type Dummy struct { + Foo int `json:"foo"` + } + c, err := New(&config.Config{ + Host: "some", + Token: "token", + ConfigFile: "/dev/null", + HTTPTransport: hc(func(r *http.Request) (*http.Response, error) { + if r.URL.RawQuery != "a=b&c=1" { + return nil, fmt.Errorf("unexpected query params: %s", r.URL.RawQuery) + } + return &http.Response{ + StatusCode: 200, + Body: io.NopCloser(strings.NewReader(`{"foo": 2}`)), + Request: r, + }, nil + }), + }) + require.NoError(t, err) + var resp Dummy + err = c.Do( + context.Background(), + "POST", + "/c", + nil, + map[string]any{ + "a": "b", + "c": 1, + }, + Dummy{1}, + &resp, + ) require.NoError(t, err) require.Equal(t, 2, resp.Foo) } @@ -180,7 +237,15 @@ func TestSimpleRequestRetried(t *testing.T) { }) require.NoError(t, err) var resp Dummy - err = c.Do(context.Background(), "PATCH", "/a", nil, Dummy{1}, &resp) + err = c.Do( + context.Background(), + "PATCH", + "/a", + nil, + nil, + Dummy{1}, + &resp, + ) require.NoError(t, err) require.Equal(t, 2, resp.Foo) require.True(t, retried[0], "request was not retried") @@ -203,7 +268,15 @@ func TestSimpleRequestAPIError(t *testing.T) { }), }) require.NoError(t, err) - err = c.Do(context.Background(), "PATCH", "/a", nil, map[string]any{}, nil) + err = c.Do( + context.Background(), + "PATCH", + "/a", + nil, + nil, + map[string]any{}, + nil, + ) var aerr *apierr.APIError require.ErrorAs(t, err, &aerr) require.Equal(t, "NOT_FOUND", aerr.ErrorCode) @@ -223,7 +296,15 @@ func TestHttpTransport(t *testing.T) { client, err := New(cfg) require.NoError(t, err) - err = client.Do(context.Background(), "GET", "/a", nil, nil, bytes.Buffer{}) + err = client.Do( + context.Background(), + "GET", + "/a", + nil, + nil, + nil, + bytes.Buffer{}, + ) require.NoError(t, err) require.True(t, calledMock) } @@ -249,9 +330,25 @@ func TestDoRemovesDoubleSlashesFromFilesAPI(t *testing.T) { }), }) require.NoError(t, err) - err = c.Do(context.Background(), "GET", "/api/2.0/fs/files//Volumes/abc/def/ghi", nil, map[string]any{}, nil) + err = c.Do( + context.Background(), + "GET", + "/api/2.0/fs/files//Volumes/abc/def/ghi", + nil, + nil, + map[string]any{}, + nil, + ) require.NoError(t, err) - err = c.Do(context.Background(), "GET", "/api/2.0/anotherservice//test", nil, map[string]any{}, nil) + err = c.Do( + context.Background(), + "GET", + "/api/2.0/anotherservice//test", + nil, + nil, + map[string]any{}, + nil, + ) require.NoError(t, err) } @@ -340,7 +437,15 @@ func captureUserAgent(t *testing.T) string { }) require.NoError(t, err) - err = c.Do(context.Background(), "GET", "/a", nil, nil, nil) + err = c.Do( + context.Background(), + "GET", + "/a", + nil, + nil, + nil, + nil, + ) require.NoError(t, err) return userAgent @@ -450,7 +555,15 @@ func testNonJSONResponseIncludedInError(t *testing.T, statusCode int, status, er }) require.NoError(t, err) var m map[string]string - err = c.Do(context.Background(), "GET", "/a", nil, nil, &m) + err = c.Do( + context.Background(), + "GET", + "/a", + nil, + nil, + nil, + &m, + ) require.EqualError(t, err, errorMessage) } @@ -477,6 +590,14 @@ func TestRetryOn503(t *testing.T) { }), }) require.NoError(t, err) - err = c.Do(context.Background(), "GET", "/a/b", nil, map[string]any{}, nil) + err = c.Do( + context.Background(), + "GET", + "/a/b", + nil, + nil, + map[string]any{}, + nil, + ) require.NoError(t, err) } diff --git a/httpclient/api_client.go b/httpclient/api_client.go index 2130fd3bb..8cfec0a01 100644 --- a/httpclient/api_client.go +++ b/httpclient/api_client.go @@ -111,13 +111,21 @@ type DoOption struct { body any contentType string isAuthOption bool + queryParams map[string]any } // Do sends an HTTP request against path. func (c *ApiClient) Do(ctx context.Context, method, path string, opts ...DoOption) error { var authVisitor RequestVisitor + var explicitQueryParams map[string]any visitors := c.config.Visitors[:] for _, o := range opts { + if o.queryParams != nil { + if explicitQueryParams != nil { + return fmt.Errorf("only one set of query params is allowed") + } + explicitQueryParams = o.queryParams + } if o.in == nil { continue } @@ -150,7 +158,7 @@ func (c *ApiClient) Do(ctx context.Context, method, path string, opts ...DoOptio data = o.body contentType = o.contentType } - requestBody, err := makeRequestBody(method, &path, data, contentType) + requestBody, err := makeRequestBody(method, &path, data, contentType, explicitQueryParams) if err != nil { return fmt.Errorf("request marshal: %w", err) } diff --git a/httpclient/api_client_test.go b/httpclient/api_client_test.go index 5e382a565..9c66d2806 100644 --- a/httpclient/api_client_test.go +++ b/httpclient/api_client_test.go @@ -67,6 +67,24 @@ func TestSimpleRequestFailsURLError(t *testing.T) { require.EqualError(t, err, `Get "/a/b?c=d": nope`) } +func TestQueryParameters(t *testing.T) { + c := NewApiClient(ClientConfig{ + RetryTimeout: 1 * time.Millisecond, + Transport: hc(func(r *http.Request) (*http.Response, error) { + require.Equal(t, "POST", r.Method) + require.Equal(t, "/a/b", r.URL.Path) + require.Equal(t, "c=d&e=1", r.URL.RawQuery) + return nil, fmt.Errorf("nope") + }), + }) + err := c.Do(context.Background(), "POST", "/a/b", + WithQueryParameters(map[string]any{ + "c": "d", + "e": 1, + })) + require.EqualError(t, err, `Post "/a/b?c=d&e=1": nope`) +} + func TestSimpleRequestFailsAPIError(t *testing.T) { c := NewApiClient(ClientConfig{ Transport: hc(func(r *http.Request) (*http.Response, error) { diff --git a/httpclient/request.go b/httpclient/request.go index b8401fe19..ca76ad2ee 100644 --- a/httpclient/request.go +++ b/httpclient/request.go @@ -73,6 +73,18 @@ func WithRequestData(body any) DoOption { } } +// WithQueryParameters takes a map and sends it as query string for non GET/DELETE/HEAD calls. +// This is ignored for GET/DELETE/HEAD calls, as the query parameters are serialized from the body instead. +// +// Experimental: this method may eventually be split into more granular options. +func WithQueryParameters(queryParams map[string]any) DoOption { + // refactor this, so that we split JSON/query string serialization and make + // separate request visitors internally. + return DoOption{ + queryParams: queryParams, + } +} + // WithUrlEncodedData takes either a struct instance, map, string, bytes, or io.Reader plus // a content type, and sends it either as query string for GET and DELETE calls, or as request body // for POST, PUT, and PATCH calls. The content type is set to "application/x-www-form-urlencoded" @@ -148,24 +160,41 @@ func EncodeMultiSegmentPathParameter(p string) string { return b.String() } -func makeRequestBody(method string, requestURL *string, data interface{}, contentType string) (common.RequestBody, error) { - if data == nil { +// We used to not send any query parameters for non GET/DELETE/HEAD requests. +// Moreover, serialization for query paramters in GET/DELETE/HEAD requests depends on the `url` tag. +// This tag is wrongly generated and fixing it will have an unknown inpact on the SDK. +// So: +// * GET/DELETE/HEAD requests are sent with query parameters serialized from data using the `url` tag as before (no change). +// * The rest of the requests are sent with query parameters serialized from explicitQueryParams, which does not use the `url` tag. +// TODO: For SDK-Mod, refactor this and remove the `url` tag completely. +func makeRequestBody(method string, requestURL *string, data interface{}, contentType string, explicitQueryParams map[string]any) (common.RequestBody, error) { + if data == nil && len(explicitQueryParams) == 0 { return common.RequestBody{}, nil } - if method == "GET" || method == "DELETE" || method == "HEAD" { - qs, err := makeQueryString(data) - if err != nil { - return common.RequestBody{}, err + if data != nil { + if method == "GET" || method == "DELETE" || method == "HEAD" { + qs, err := makeQueryString(data) + if err != nil { + return common.RequestBody{}, err + } + *requestURL += "?" + qs + return common.NewRequestBody([]byte{}) + } + if contentType == UrlEncodedContentType { + qs, err := makeQueryString(data) + if err != nil { + return common.RequestBody{}, err + } + return common.NewRequestBody(qs) } - *requestURL += "?" + qs - return common.NewRequestBody([]byte{}) } - if contentType == UrlEncodedContentType { - qs, err := makeQueryString(data) + if len(explicitQueryParams) > 0 { + qs, err := makeQueryString(explicitQueryParams) if err != nil { return common.RequestBody{}, err } - return common.NewRequestBody(qs) + *requestURL += "?" + qs + return common.NewRequestBody(data) } return common.NewRequestBody(data) } diff --git a/httpclient/request_test.go b/httpclient/request_test.go index 6b388af53..695875099 100644 --- a/httpclient/request_test.go +++ b/httpclient/request_test.go @@ -20,7 +20,7 @@ func TestMakeRequestBody(t *testing.T) { Scope string `json:"scope" url:"scope"` } requestURL := "/a/b/c" - body, err := makeRequestBody("GET", &requestURL, x{"test"}, "") + body, err := makeRequestBody("GET", &requestURL, x{"test"}, "", nil) require.NoError(t, err) bodyBytes, err := io.ReadAll(body.Reader) require.NoError(t, err) @@ -28,7 +28,7 @@ func TestMakeRequestBody(t *testing.T) { require.Equal(t, 0, len(bodyBytes)) requestURL = "/a/b/c" - body, err = makeRequestBody("POST", &requestURL, x{"test"}, "") + body, err = makeRequestBody("POST", &requestURL, x{"test"}, "", nil) require.NoError(t, err) bodyBytes, err = io.ReadAll(body.Reader) require.NoError(t, err) @@ -37,7 +37,7 @@ func TestMakeRequestBody(t *testing.T) { require.Equal(t, []byte(x1), bodyBytes) requestURL = "/a/b/c" - body, err = makeRequestBody("HEAD", &requestURL, x{"test"}, "") + body, err = makeRequestBody("HEAD", &requestURL, x{"test"}, "", nil) require.NoError(t, err) bodyBytes, err = io.ReadAll(body.Reader) require.NoError(t, err) @@ -47,7 +47,7 @@ func TestMakeRequestBody(t *testing.T) { func TestMakeRequestBodyFromReader(t *testing.T) { requestURL := "/a/b/c" - body, err := makeRequestBody("PUT", &requestURL, strings.NewReader("abc"), "") + body, err := makeRequestBody("PUT", &requestURL, strings.NewReader("abc"), "", nil) require.NoError(t, err) bodyBytes, err := io.ReadAll(body.Reader) require.NoError(t, err) @@ -61,7 +61,7 @@ func TestUrlEncoding(t *testing.T) { GrantType: "grant", } requestURL := "/a/b/c" - body, err := makeRequestBody("POST", &requestURL, data, UrlEncodedContentType) + body, err := makeRequestBody("POST", &requestURL, data, UrlEncodedContentType, nil) require.NoError(t, err) bodyBytes, err := io.ReadAll(body.Reader) require.NoError(t, err) @@ -71,7 +71,7 @@ func TestUrlEncoding(t *testing.T) { func TestMakeRequestBodyReaderError(t *testing.T) { requestURL := "/a/b/c" - _, err := makeRequestBody("POST", &requestURL, errReader(false), "") + _, err := makeRequestBody("POST", &requestURL, errReader(false), "", nil) // The request body is only read once the request is sent, so no error // should be returned until then. require.NoError(t, err, "request body reader error should be ignored") @@ -82,7 +82,7 @@ func TestMakeRequestBodyJsonError(t *testing.T) { type x struct { Foo chan string `json:"foo"` } - _, err := makeRequestBody("POST", &requestURL, x{make(chan string)}, "") + _, err := makeRequestBody("POST", &requestURL, x{make(chan string)}, "", nil) require.EqualError(t, err, "request marshal failure: json: unsupported type: chan string") } @@ -97,13 +97,13 @@ func TestMakeRequestBodyQueryFailingEncode(t *testing.T) { type x struct { Foo failingUrlEncode `url:"foo"` } - _, err := makeRequestBody("GET", &requestURL, x{failingUrlEncode("always failing")}, "") + _, err := makeRequestBody("GET", &requestURL, x{failingUrlEncode("always failing")}, "", nil) require.EqualError(t, err, "cannot create query string: always failing") } func TestMakeRequestBodyQueryUnsupported(t *testing.T) { requestURL := "/a/b/c" - _, err := makeRequestBody("GET", &requestURL, true, "") + _, err := makeRequestBody("GET", &requestURL, true, "", nil) require.EqualError(t, err, "unsupported query string data: true") } @@ -141,3 +141,26 @@ func TestEncodeMultiSegmentPathParameter(t *testing.T) { // # and ? should be encoded. assert.Equal(t, "a%23b%3Fc", EncodeMultiSegmentPathParameter("a#b?c")) } + +func TestMakeRequestBodyExplicitQueryParams(t *testing.T) { + type x struct { + Scope string `json:"scope" url:"scope"` + } + requestURL := "/a/b/c" + // For GET, it should be ignored. + body, err := makeRequestBody("GET", &requestURL, x{"test"}, "", map[string]any{"foo": "bar"}) + require.NoError(t, err) + bodyBytes, err := io.ReadAll(body.Reader) + require.NoError(t, err) + require.Equal(t, "/a/b/c?scope=test", requestURL) + require.Equal(t, 0, len(bodyBytes)) + + requestURL = "/a/b/c" + body, err = makeRequestBody("POST", &requestURL, x{"test"}, "", map[string]any{"foo": "bar"}) + require.NoError(t, err) + bodyBytes, err = io.ReadAll(body.Reader) + require.NoError(t, err) + require.Equal(t, "/a/b/c?foo=bar", requestURL) + x1 := `{"scope":"test"}` + require.Equal(t, []byte(x1), bodyBytes) +} diff --git a/service/apps/impl.go b/service/apps/impl.go index 85cd53555..21121c39d 100755 --- a/service/apps/impl.go +++ b/service/apps/impl.go @@ -18,132 +18,147 @@ type appsImpl struct { func (a *appsImpl) Create(ctx context.Context, request CreateAppRequest) (*App, error) { var app App path := "/api/2.0/apps" + queryParams := make(map[string]any) + queryParams["no_compute"] = request.NoCompute headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.App, &app) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.App, &app) return &app, err } func (a *appsImpl) Delete(ctx context.Context, request DeleteAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0/apps/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &app) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &app) return &app, err } func (a *appsImpl) Deploy(ctx context.Context, request CreateAppDeploymentRequest) (*AppDeployment, error) { var appDeployment AppDeployment path := fmt.Sprintf("/api/2.0/apps/%v/deployments", request.AppName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.AppDeployment, &appDeployment) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.AppDeployment, &appDeployment) return &appDeployment, err } func (a *appsImpl) Get(ctx context.Context, request GetAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0/apps/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &app) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &app) return &app, err } func (a *appsImpl) GetDeployment(ctx context.Context, request GetAppDeploymentRequest) (*AppDeployment, error) { var appDeployment AppDeployment path := fmt.Sprintf("/api/2.0/apps/%v/deployments/%v", request.AppName, request.DeploymentId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &appDeployment) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &appDeployment) return &appDeployment, err } func (a *appsImpl) GetPermissionLevels(ctx context.Context, request GetAppPermissionLevelsRequest) (*GetAppPermissionLevelsResponse, error) { var getAppPermissionLevelsResponse GetAppPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/apps/%v/permissionLevels", request.AppName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getAppPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getAppPermissionLevelsResponse) return &getAppPermissionLevelsResponse, err } func (a *appsImpl) GetPermissions(ctx context.Context, request GetAppPermissionsRequest) (*AppPermissions, error) { var appPermissions AppPermissions path := fmt.Sprintf("/api/2.0/permissions/apps/%v", request.AppName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &appPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &appPermissions) return &appPermissions, err } func (a *appsImpl) List(ctx context.Context, request ListAppsRequest) (*ListAppsResponse, error) { var listAppsResponse ListAppsResponse path := "/api/2.0/apps" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAppsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAppsResponse) return &listAppsResponse, err } func (a *appsImpl) ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { var listAppDeploymentsResponse ListAppDeploymentsResponse path := fmt.Sprintf("/api/2.0/apps/%v/deployments", request.AppName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAppDeploymentsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAppDeploymentsResponse) return &listAppDeploymentsResponse, err } func (a *appsImpl) SetPermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) { var appPermissions AppPermissions path := fmt.Sprintf("/api/2.0/permissions/apps/%v", request.AppName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &appPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &appPermissions) return &appPermissions, err } func (a *appsImpl) Start(ctx context.Context, request StartAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0/apps/%v/start", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &app) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &app) return &app, err } func (a *appsImpl) Stop(ctx context.Context, request StopAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0/apps/%v/stop", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &app) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &app) return &app, err } func (a *appsImpl) Update(ctx context.Context, request UpdateAppRequest) (*App, error) { var app App path := fmt.Sprintf("/api/2.0/apps/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request.App, &app) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.App, &app) return &app, err } func (a *appsImpl) UpdatePermissions(ctx context.Context, request AppPermissionsRequest) (*AppPermissions, error) { var appPermissions AppPermissions path := fmt.Sprintf("/api/2.0/permissions/apps/%v", request.AppName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &appPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &appPermissions) return &appPermissions, err } diff --git a/service/billing/impl.go b/service/billing/impl.go index 373be1aec..85942be54 100755 --- a/service/billing/impl.go +++ b/service/billing/impl.go @@ -18,9 +18,10 @@ type billableUsageImpl struct { func (a *billableUsageImpl) Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) { var downloadResponse DownloadResponse path := fmt.Sprintf("/api/2.0/accounts/%v/usage/download", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "text/plain" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &downloadResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &downloadResponse) return &downloadResponse, err } @@ -32,47 +33,52 @@ type budgetsImpl struct { func (a *budgetsImpl) Create(ctx context.Context, request CreateBudgetConfigurationRequest) (*CreateBudgetConfigurationResponse, error) { var createBudgetConfigurationResponse CreateBudgetConfigurationResponse path := fmt.Sprintf("/api/2.1/accounts/%v/budgets", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createBudgetConfigurationResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createBudgetConfigurationResponse) return &createBudgetConfigurationResponse, err } func (a *budgetsImpl) Delete(ctx context.Context, request DeleteBudgetConfigurationRequest) error { var deleteBudgetConfigurationResponse DeleteBudgetConfigurationResponse path := fmt.Sprintf("/api/2.1/accounts/%v/budgets/%v", a.client.ConfiguredAccountID(), request.BudgetId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteBudgetConfigurationResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteBudgetConfigurationResponse) return err } func (a *budgetsImpl) Get(ctx context.Context, request GetBudgetConfigurationRequest) (*GetBudgetConfigurationResponse, error) { var getBudgetConfigurationResponse GetBudgetConfigurationResponse path := fmt.Sprintf("/api/2.1/accounts/%v/budgets/%v", a.client.ConfiguredAccountID(), request.BudgetId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getBudgetConfigurationResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getBudgetConfigurationResponse) return &getBudgetConfigurationResponse, err } func (a *budgetsImpl) List(ctx context.Context, request ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { var listBudgetConfigurationsResponse ListBudgetConfigurationsResponse path := fmt.Sprintf("/api/2.1/accounts/%v/budgets", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listBudgetConfigurationsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listBudgetConfigurationsResponse) return &listBudgetConfigurationsResponse, err } func (a *budgetsImpl) Update(ctx context.Context, request UpdateBudgetConfigurationRequest) (*UpdateBudgetConfigurationResponse, error) { var updateBudgetConfigurationResponse UpdateBudgetConfigurationResponse path := fmt.Sprintf("/api/2.1/accounts/%v/budgets/%v", a.client.ConfiguredAccountID(), request.BudgetId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateBudgetConfigurationResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateBudgetConfigurationResponse) return &updateBudgetConfigurationResponse, err } @@ -84,38 +90,42 @@ type logDeliveryImpl struct { func (a *logDeliveryImpl) Create(ctx context.Context, request WrappedCreateLogDeliveryConfiguration) (*WrappedLogDeliveryConfiguration, error) { var wrappedLogDeliveryConfiguration WrappedLogDeliveryConfiguration path := fmt.Sprintf("/api/2.0/accounts/%v/log-delivery", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &wrappedLogDeliveryConfiguration) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &wrappedLogDeliveryConfiguration) return &wrappedLogDeliveryConfiguration, err } func (a *logDeliveryImpl) Get(ctx context.Context, request GetLogDeliveryRequest) (*WrappedLogDeliveryConfiguration, error) { var wrappedLogDeliveryConfiguration WrappedLogDeliveryConfiguration path := fmt.Sprintf("/api/2.0/accounts/%v/log-delivery/%v", a.client.ConfiguredAccountID(), request.LogDeliveryConfigurationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &wrappedLogDeliveryConfiguration) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &wrappedLogDeliveryConfiguration) return &wrappedLogDeliveryConfiguration, err } func (a *logDeliveryImpl) List(ctx context.Context, request ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { var wrappedLogDeliveryConfigurations WrappedLogDeliveryConfigurations path := fmt.Sprintf("/api/2.0/accounts/%v/log-delivery", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &wrappedLogDeliveryConfigurations) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &wrappedLogDeliveryConfigurations) return &wrappedLogDeliveryConfigurations, err } func (a *logDeliveryImpl) PatchStatus(ctx context.Context, request UpdateLogDeliveryConfigurationStatusRequest) error { var patchStatusResponse PatchStatusResponse path := fmt.Sprintf("/api/2.0/accounts/%v/log-delivery/%v", a.client.ConfiguredAccountID(), request.LogDeliveryConfigurationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &patchStatusResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchStatusResponse) return err } @@ -127,18 +137,20 @@ type usageDashboardsImpl struct { func (a *usageDashboardsImpl) Create(ctx context.Context, request CreateBillingUsageDashboardRequest) (*CreateBillingUsageDashboardResponse, error) { var createBillingUsageDashboardResponse CreateBillingUsageDashboardResponse path := fmt.Sprintf("/api/2.0/accounts/%v/dashboard", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createBillingUsageDashboardResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createBillingUsageDashboardResponse) return &createBillingUsageDashboardResponse, err } func (a *usageDashboardsImpl) Get(ctx context.Context, request GetBillingUsageDashboardRequest) (*GetBillingUsageDashboardResponse, error) { var getBillingUsageDashboardResponse GetBillingUsageDashboardResponse path := fmt.Sprintf("/api/2.0/accounts/%v/dashboard", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getBillingUsageDashboardResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getBillingUsageDashboardResponse) return &getBillingUsageDashboardResponse, err } diff --git a/service/catalog/impl.go b/service/catalog/impl.go index 78c34e52d..91e940943 100755 --- a/service/catalog/impl.go +++ b/service/catalog/impl.go @@ -18,47 +18,52 @@ type accountMetastoreAssignmentsImpl struct { func (a *accountMetastoreAssignmentsImpl) Create(ctx context.Context, request AccountsCreateMetastoreAssignment) error { var createResponse CreateResponse path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createResponse) return err } func (a *accountMetastoreAssignmentsImpl) Delete(ctx context.Context, request DeleteAccountMetastoreAssignmentRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *accountMetastoreAssignmentsImpl) Get(ctx context.Context, request GetAccountMetastoreAssignmentRequest) (*AccountsMetastoreAssignment, error) { var accountsMetastoreAssignment AccountsMetastoreAssignment path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/metastore", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &accountsMetastoreAssignment) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &accountsMetastoreAssignment) return &accountsMetastoreAssignment, err } func (a *accountMetastoreAssignmentsImpl) List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { var listAccountMetastoreAssignmentsResponse ListAccountMetastoreAssignmentsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v/workspaces", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAccountMetastoreAssignmentsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAccountMetastoreAssignmentsResponse) return &listAccountMetastoreAssignmentsResponse, err } func (a *accountMetastoreAssignmentsImpl) Update(ctx context.Context, request AccountsUpdateMetastoreAssignment) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/metastores/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err } @@ -70,47 +75,52 @@ type accountMetastoresImpl struct { func (a *accountMetastoresImpl) Create(ctx context.Context, request AccountsCreateMetastore) (*AccountsMetastoreInfo, error) { var accountsMetastoreInfo AccountsMetastoreInfo path := fmt.Sprintf("/api/2.0/accounts/%v/metastores", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &accountsMetastoreInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &accountsMetastoreInfo) return &accountsMetastoreInfo, err } func (a *accountMetastoresImpl) Delete(ctx context.Context, request DeleteAccountMetastoreRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *accountMetastoresImpl) Get(ctx context.Context, request GetAccountMetastoreRequest) (*AccountsMetastoreInfo, error) { var accountsMetastoreInfo AccountsMetastoreInfo path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &accountsMetastoreInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &accountsMetastoreInfo) return &accountsMetastoreInfo, err } func (a *accountMetastoresImpl) List(ctx context.Context) (*ListMetastoresResponse, error) { var listMetastoresResponse ListMetastoresResponse path := fmt.Sprintf("/api/2.0/accounts/%v/metastores", a.client.ConfiguredAccountID()) + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listMetastoresResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listMetastoresResponse) return &listMetastoresResponse, err } func (a *accountMetastoresImpl) Update(ctx context.Context, request AccountsUpdateMetastore) (*AccountsMetastoreInfo, error) { var accountsMetastoreInfo AccountsMetastoreInfo path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &accountsMetastoreInfo) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &accountsMetastoreInfo) return &accountsMetastoreInfo, err } @@ -122,47 +132,52 @@ type accountStorageCredentialsImpl struct { func (a *accountStorageCredentialsImpl) Create(ctx context.Context, request AccountsCreateStorageCredential) (*AccountsStorageCredentialInfo, error) { var accountsStorageCredentialInfo AccountsStorageCredentialInfo path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v/storage-credentials", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &accountsStorageCredentialInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &accountsStorageCredentialInfo) return &accountsStorageCredentialInfo, err } func (a *accountStorageCredentialsImpl) Delete(ctx context.Context, request DeleteAccountStorageCredentialRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v/storage-credentials/%v", a.client.ConfiguredAccountID(), request.MetastoreId, request.StorageCredentialName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *accountStorageCredentialsImpl) Get(ctx context.Context, request GetAccountStorageCredentialRequest) (*AccountsStorageCredentialInfo, error) { var accountsStorageCredentialInfo AccountsStorageCredentialInfo path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v/storage-credentials/%v", a.client.ConfiguredAccountID(), request.MetastoreId, request.StorageCredentialName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &accountsStorageCredentialInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &accountsStorageCredentialInfo) return &accountsStorageCredentialInfo, err } func (a *accountStorageCredentialsImpl) List(ctx context.Context, request ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { var listAccountStorageCredentialsResponse ListAccountStorageCredentialsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v/storage-credentials", a.client.ConfiguredAccountID(), request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAccountStorageCredentialsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAccountStorageCredentialsResponse) return &listAccountStorageCredentialsResponse, err } func (a *accountStorageCredentialsImpl) Update(ctx context.Context, request AccountsUpdateStorageCredential) (*AccountsStorageCredentialInfo, error) { var accountsStorageCredentialInfo AccountsStorageCredentialInfo path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v/storage-credentials/%v", a.client.ConfiguredAccountID(), request.MetastoreId, request.StorageCredentialName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &accountsStorageCredentialInfo) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &accountsStorageCredentialInfo) return &accountsStorageCredentialInfo, err } @@ -174,19 +189,21 @@ type artifactAllowlistsImpl struct { func (a *artifactAllowlistsImpl) Get(ctx context.Context, request GetArtifactAllowlistRequest) (*ArtifactAllowlistInfo, error) { var artifactAllowlistInfo ArtifactAllowlistInfo path := fmt.Sprintf("/api/2.1/unity-catalog/artifact-allowlists/%v", request.ArtifactType) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &artifactAllowlistInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &artifactAllowlistInfo) return &artifactAllowlistInfo, err } func (a *artifactAllowlistsImpl) Update(ctx context.Context, request SetArtifactAllowlist) (*ArtifactAllowlistInfo, error) { var artifactAllowlistInfo ArtifactAllowlistInfo path := fmt.Sprintf("/api/2.1/unity-catalog/artifact-allowlists/%v", request.ArtifactType) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &artifactAllowlistInfo) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &artifactAllowlistInfo) return &artifactAllowlistInfo, err } @@ -198,47 +215,52 @@ type catalogsImpl struct { func (a *catalogsImpl) Create(ctx context.Context, request CreateCatalog) (*CatalogInfo, error) { var catalogInfo CatalogInfo path := "/api/2.1/unity-catalog/catalogs" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &catalogInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &catalogInfo) return &catalogInfo, err } func (a *catalogsImpl) Delete(ctx context.Context, request DeleteCatalogRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/catalogs/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *catalogsImpl) Get(ctx context.Context, request GetCatalogRequest) (*CatalogInfo, error) { var catalogInfo CatalogInfo path := fmt.Sprintf("/api/2.1/unity-catalog/catalogs/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &catalogInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &catalogInfo) return &catalogInfo, err } func (a *catalogsImpl) List(ctx context.Context, request ListCatalogsRequest) (*ListCatalogsResponse, error) { var listCatalogsResponse ListCatalogsResponse path := "/api/2.1/unity-catalog/catalogs" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listCatalogsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCatalogsResponse) return &listCatalogsResponse, err } func (a *catalogsImpl) Update(ctx context.Context, request UpdateCatalog) (*CatalogInfo, error) { var catalogInfo CatalogInfo path := fmt.Sprintf("/api/2.1/unity-catalog/catalogs/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &catalogInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &catalogInfo) return &catalogInfo, err } @@ -250,47 +272,52 @@ type connectionsImpl struct { func (a *connectionsImpl) Create(ctx context.Context, request CreateConnection) (*ConnectionInfo, error) { var connectionInfo ConnectionInfo path := "/api/2.1/unity-catalog/connections" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &connectionInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &connectionInfo) return &connectionInfo, err } func (a *connectionsImpl) Delete(ctx context.Context, request DeleteConnectionRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/connections/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *connectionsImpl) Get(ctx context.Context, request GetConnectionRequest) (*ConnectionInfo, error) { var connectionInfo ConnectionInfo path := fmt.Sprintf("/api/2.1/unity-catalog/connections/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &connectionInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &connectionInfo) return &connectionInfo, err } func (a *connectionsImpl) List(ctx context.Context, request ListConnectionsRequest) (*ListConnectionsResponse, error) { var listConnectionsResponse ListConnectionsResponse path := "/api/2.1/unity-catalog/connections" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listConnectionsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listConnectionsResponse) return &listConnectionsResponse, err } func (a *connectionsImpl) Update(ctx context.Context, request UpdateConnection) (*ConnectionInfo, error) { var connectionInfo ConnectionInfo path := fmt.Sprintf("/api/2.1/unity-catalog/connections/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &connectionInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &connectionInfo) return &connectionInfo, err } @@ -302,67 +329,74 @@ type credentialsImpl struct { func (a *credentialsImpl) CreateCredential(ctx context.Context, request CreateCredentialRequest) (*CredentialInfo, error) { var credentialInfo CredentialInfo path := "/api/2.1/unity-catalog/credentials" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &credentialInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &credentialInfo) return &credentialInfo, err } func (a *credentialsImpl) DeleteCredential(ctx context.Context, request DeleteCredentialRequest) error { var deleteCredentialResponse DeleteCredentialResponse path := fmt.Sprintf("/api/2.1/unity-catalog/credentials/%v", request.NameArg) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteCredentialResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCredentialResponse) return err } func (a *credentialsImpl) GenerateTemporaryServiceCredential(ctx context.Context, request GenerateTemporaryServiceCredentialRequest) (*TemporaryCredentials, error) { var temporaryCredentials TemporaryCredentials path := "/api/2.1/unity-catalog/temporary-service-credentials" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &temporaryCredentials) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &temporaryCredentials) return &temporaryCredentials, err } func (a *credentialsImpl) GetCredential(ctx context.Context, request GetCredentialRequest) (*CredentialInfo, error) { var credentialInfo CredentialInfo path := fmt.Sprintf("/api/2.1/unity-catalog/credentials/%v", request.NameArg) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &credentialInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &credentialInfo) return &credentialInfo, err } func (a *credentialsImpl) ListCredentials(ctx context.Context, request ListCredentialsRequest) (*ListCredentialsResponse, error) { var listCredentialsResponse ListCredentialsResponse path := "/api/2.1/unity-catalog/credentials" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listCredentialsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCredentialsResponse) return &listCredentialsResponse, err } func (a *credentialsImpl) UpdateCredential(ctx context.Context, request UpdateCredentialRequest) (*CredentialInfo, error) { var credentialInfo CredentialInfo path := fmt.Sprintf("/api/2.1/unity-catalog/credentials/%v", request.NameArg) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &credentialInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &credentialInfo) return &credentialInfo, err } func (a *credentialsImpl) ValidateCredential(ctx context.Context, request ValidateCredentialRequest) (*ValidateCredentialResponse, error) { var validateCredentialResponse ValidateCredentialResponse path := "/api/2.1/unity-catalog/validate-credentials" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &validateCredentialResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &validateCredentialResponse) return &validateCredentialResponse, err } @@ -374,47 +408,52 @@ type externalLocationsImpl struct { func (a *externalLocationsImpl) Create(ctx context.Context, request CreateExternalLocation) (*ExternalLocationInfo, error) { var externalLocationInfo ExternalLocationInfo path := "/api/2.1/unity-catalog/external-locations" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &externalLocationInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &externalLocationInfo) return &externalLocationInfo, err } func (a *externalLocationsImpl) Delete(ctx context.Context, request DeleteExternalLocationRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/external-locations/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *externalLocationsImpl) Get(ctx context.Context, request GetExternalLocationRequest) (*ExternalLocationInfo, error) { var externalLocationInfo ExternalLocationInfo path := fmt.Sprintf("/api/2.1/unity-catalog/external-locations/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &externalLocationInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &externalLocationInfo) return &externalLocationInfo, err } func (a *externalLocationsImpl) List(ctx context.Context, request ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { var listExternalLocationsResponse ListExternalLocationsResponse path := "/api/2.1/unity-catalog/external-locations" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listExternalLocationsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExternalLocationsResponse) return &listExternalLocationsResponse, err } func (a *externalLocationsImpl) Update(ctx context.Context, request UpdateExternalLocation) (*ExternalLocationInfo, error) { var externalLocationInfo ExternalLocationInfo path := fmt.Sprintf("/api/2.1/unity-catalog/external-locations/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &externalLocationInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &externalLocationInfo) return &externalLocationInfo, err } @@ -426,47 +465,52 @@ type functionsImpl struct { func (a *functionsImpl) Create(ctx context.Context, request CreateFunctionRequest) (*FunctionInfo, error) { var functionInfo FunctionInfo path := "/api/2.1/unity-catalog/functions" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &functionInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &functionInfo) return &functionInfo, err } func (a *functionsImpl) Delete(ctx context.Context, request DeleteFunctionRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/functions/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *functionsImpl) Get(ctx context.Context, request GetFunctionRequest) (*FunctionInfo, error) { var functionInfo FunctionInfo path := fmt.Sprintf("/api/2.1/unity-catalog/functions/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &functionInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &functionInfo) return &functionInfo, err } func (a *functionsImpl) List(ctx context.Context, request ListFunctionsRequest) (*ListFunctionsResponse, error) { var listFunctionsResponse ListFunctionsResponse path := "/api/2.1/unity-catalog/functions" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listFunctionsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFunctionsResponse) return &listFunctionsResponse, err } func (a *functionsImpl) Update(ctx context.Context, request UpdateFunction) (*FunctionInfo, error) { var functionInfo FunctionInfo path := fmt.Sprintf("/api/2.1/unity-catalog/functions/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &functionInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &functionInfo) return &functionInfo, err } @@ -478,28 +522,31 @@ type grantsImpl struct { func (a *grantsImpl) Get(ctx context.Context, request GetGrantRequest) (*PermissionsList, error) { var permissionsList PermissionsList path := fmt.Sprintf("/api/2.1/unity-catalog/permissions/%v/%v", request.SecurableType, request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &permissionsList) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &permissionsList) return &permissionsList, err } func (a *grantsImpl) GetEffective(ctx context.Context, request GetEffectiveRequest) (*EffectivePermissionsList, error) { var effectivePermissionsList EffectivePermissionsList path := fmt.Sprintf("/api/2.1/unity-catalog/effective-permissions/%v/%v", request.SecurableType, request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &effectivePermissionsList) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &effectivePermissionsList) return &effectivePermissionsList, err } func (a *grantsImpl) Update(ctx context.Context, request UpdatePermissions) (*PermissionsList, error) { var permissionsList PermissionsList path := fmt.Sprintf("/api/2.1/unity-catalog/permissions/%v/%v", request.SecurableType, request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &permissionsList) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &permissionsList) return &permissionsList, err } @@ -511,94 +558,104 @@ type metastoresImpl struct { func (a *metastoresImpl) Assign(ctx context.Context, request CreateMetastoreAssignment) error { var assignResponse AssignResponse path := fmt.Sprintf("/api/2.1/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &assignResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &assignResponse) return err } func (a *metastoresImpl) Create(ctx context.Context, request CreateMetastore) (*MetastoreInfo, error) { var metastoreInfo MetastoreInfo path := "/api/2.1/unity-catalog/metastores" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &metastoreInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &metastoreInfo) return &metastoreInfo, err } func (a *metastoresImpl) Current(ctx context.Context) (*MetastoreAssignment, error) { var metastoreAssignment MetastoreAssignment path := "/api/2.1/unity-catalog/current-metastore-assignment" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &metastoreAssignment) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &metastoreAssignment) return &metastoreAssignment, err } func (a *metastoresImpl) Delete(ctx context.Context, request DeleteMetastoreRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/metastores/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *metastoresImpl) Get(ctx context.Context, request GetMetastoreRequest) (*MetastoreInfo, error) { var metastoreInfo MetastoreInfo path := fmt.Sprintf("/api/2.1/unity-catalog/metastores/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &metastoreInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &metastoreInfo) return &metastoreInfo, err } func (a *metastoresImpl) List(ctx context.Context) (*ListMetastoresResponse, error) { var listMetastoresResponse ListMetastoresResponse path := "/api/2.1/unity-catalog/metastores" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listMetastoresResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listMetastoresResponse) return &listMetastoresResponse, err } func (a *metastoresImpl) Summary(ctx context.Context) (*GetMetastoreSummaryResponse, error) { var getMetastoreSummaryResponse GetMetastoreSummaryResponse path := "/api/2.1/unity-catalog/metastore_summary" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &getMetastoreSummaryResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getMetastoreSummaryResponse) return &getMetastoreSummaryResponse, err } func (a *metastoresImpl) Unassign(ctx context.Context, request UnassignRequest) error { var unassignResponse UnassignResponse path := fmt.Sprintf("/api/2.1/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &unassignResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &unassignResponse) return err } func (a *metastoresImpl) Update(ctx context.Context, request UpdateMetastore) (*MetastoreInfo, error) { var metastoreInfo MetastoreInfo path := fmt.Sprintf("/api/2.1/unity-catalog/metastores/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &metastoreInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &metastoreInfo) return &metastoreInfo, err } func (a *metastoresImpl) UpdateAssignment(ctx context.Context, request UpdateMetastoreAssignment) error { var updateAssignmentResponse UpdateAssignmentResponse path := fmt.Sprintf("/api/2.1/unity-catalog/workspaces/%v/metastore", request.WorkspaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateAssignmentResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateAssignmentResponse) return err } @@ -610,45 +667,50 @@ type modelVersionsImpl struct { func (a *modelVersionsImpl) Delete(ctx context.Context, request DeleteModelVersionRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v/versions/%v", request.FullName, request.Version) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *modelVersionsImpl) Get(ctx context.Context, request GetModelVersionRequest) (*ModelVersionInfo, error) { var modelVersionInfo ModelVersionInfo path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v/versions/%v", request.FullName, request.Version) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &modelVersionInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &modelVersionInfo) return &modelVersionInfo, err } func (a *modelVersionsImpl) GetByAlias(ctx context.Context, request GetByAliasRequest) (*ModelVersionInfo, error) { var modelVersionInfo ModelVersionInfo path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v/aliases/%v", request.FullName, request.Alias) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &modelVersionInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &modelVersionInfo) return &modelVersionInfo, err } func (a *modelVersionsImpl) List(ctx context.Context, request ListModelVersionsRequest) (*ListModelVersionsResponse, error) { var listModelVersionsResponse ListModelVersionsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v/versions", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listModelVersionsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listModelVersionsResponse) return &listModelVersionsResponse, err } func (a *modelVersionsImpl) Update(ctx context.Context, request UpdateModelVersionRequest) (*ModelVersionInfo, error) { var modelVersionInfo ModelVersionInfo path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v/versions/%v", request.FullName, request.Version) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &modelVersionInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &modelVersionInfo) return &modelVersionInfo, err } @@ -660,28 +722,31 @@ type onlineTablesImpl struct { func (a *onlineTablesImpl) Create(ctx context.Context, request CreateOnlineTableRequest) (*OnlineTable, error) { var onlineTable OnlineTable path := "/api/2.0/online-tables" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.Table, &onlineTable) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Table, &onlineTable) return &onlineTable, err } func (a *onlineTablesImpl) Delete(ctx context.Context, request DeleteOnlineTableRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/online-tables/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *onlineTablesImpl) Get(ctx context.Context, request GetOnlineTableRequest) (*OnlineTable, error) { var onlineTable OnlineTable path := fmt.Sprintf("/api/2.0/online-tables/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &onlineTable) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &onlineTable) return &onlineTable, err } @@ -693,82 +758,91 @@ type qualityMonitorsImpl struct { func (a *qualityMonitorsImpl) CancelRefresh(ctx context.Context, request CancelRefreshRequest) error { var cancelRefreshResponse CancelRefreshResponse path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/monitor/refreshes/%v/cancel", request.TableName, request.RefreshId) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &cancelRefreshResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &cancelRefreshResponse) return err } func (a *qualityMonitorsImpl) Create(ctx context.Context, request CreateMonitor) (*MonitorInfo, error) { var monitorInfo MonitorInfo path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/monitor", request.TableName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &monitorInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &monitorInfo) return &monitorInfo, err } func (a *qualityMonitorsImpl) Delete(ctx context.Context, request DeleteQualityMonitorRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/monitor", request.TableName) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *qualityMonitorsImpl) Get(ctx context.Context, request GetQualityMonitorRequest) (*MonitorInfo, error) { var monitorInfo MonitorInfo path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/monitor", request.TableName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &monitorInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &monitorInfo) return &monitorInfo, err } func (a *qualityMonitorsImpl) GetRefresh(ctx context.Context, request GetRefreshRequest) (*MonitorRefreshInfo, error) { var monitorRefreshInfo MonitorRefreshInfo path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/monitor/refreshes/%v", request.TableName, request.RefreshId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &monitorRefreshInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &monitorRefreshInfo) return &monitorRefreshInfo, err } func (a *qualityMonitorsImpl) ListRefreshes(ctx context.Context, request ListRefreshesRequest) (*MonitorRefreshListResponse, error) { var monitorRefreshListResponse MonitorRefreshListResponse path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/monitor/refreshes", request.TableName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &monitorRefreshListResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &monitorRefreshListResponse) return &monitorRefreshListResponse, err } func (a *qualityMonitorsImpl) RegenerateDashboard(ctx context.Context, request RegenerateDashboardRequest) (*RegenerateDashboardResponse, error) { var regenerateDashboardResponse RegenerateDashboardResponse path := fmt.Sprintf("/api/2.1/quality-monitoring/tables/%v/monitor/dashboard", request.TableName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, ®enerateDashboardResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, ®enerateDashboardResponse) return ®enerateDashboardResponse, err } func (a *qualityMonitorsImpl) RunRefresh(ctx context.Context, request RunRefreshRequest) (*MonitorRefreshInfo, error) { var monitorRefreshInfo MonitorRefreshInfo path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/monitor/refreshes", request.TableName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &monitorRefreshInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &monitorRefreshInfo) return &monitorRefreshInfo, err } func (a *qualityMonitorsImpl) Update(ctx context.Context, request UpdateMonitor) (*MonitorInfo, error) { var monitorInfo MonitorInfo path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/monitor", request.TableName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &monitorInfo) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &monitorInfo) return &monitorInfo, err } @@ -780,64 +854,71 @@ type registeredModelsImpl struct { func (a *registeredModelsImpl) Create(ctx context.Context, request CreateRegisteredModelRequest) (*RegisteredModelInfo, error) { var registeredModelInfo RegisteredModelInfo path := "/api/2.1/unity-catalog/models" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, ®isteredModelInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, ®isteredModelInfo) return ®isteredModelInfo, err } func (a *registeredModelsImpl) Delete(ctx context.Context, request DeleteRegisteredModelRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *registeredModelsImpl) DeleteAlias(ctx context.Context, request DeleteAliasRequest) error { var deleteAliasResponse DeleteAliasResponse path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v/aliases/%v", request.FullName, request.Alias) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteAliasResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteAliasResponse) return err } func (a *registeredModelsImpl) Get(ctx context.Context, request GetRegisteredModelRequest) (*RegisteredModelInfo, error) { var registeredModelInfo RegisteredModelInfo path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, ®isteredModelInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, ®isteredModelInfo) return ®isteredModelInfo, err } func (a *registeredModelsImpl) List(ctx context.Context, request ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { var listRegisteredModelsResponse ListRegisteredModelsResponse path := "/api/2.1/unity-catalog/models" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listRegisteredModelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listRegisteredModelsResponse) return &listRegisteredModelsResponse, err } func (a *registeredModelsImpl) SetAlias(ctx context.Context, request SetRegisteredModelAliasRequest) (*RegisteredModelAlias, error) { var registeredModelAlias RegisteredModelAlias path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v/aliases/%v", request.FullName, request.Alias) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, ®isteredModelAlias) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, ®isteredModelAlias) return ®isteredModelAlias, err } func (a *registeredModelsImpl) Update(ctx context.Context, request UpdateRegisteredModelRequest) (*RegisteredModelInfo, error) { var registeredModelInfo RegisteredModelInfo path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, ®isteredModelInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, ®isteredModelInfo) return ®isteredModelInfo, err } @@ -849,18 +930,20 @@ type resourceQuotasImpl struct { func (a *resourceQuotasImpl) GetQuota(ctx context.Context, request GetQuotaRequest) (*GetQuotaResponse, error) { var getQuotaResponse GetQuotaResponse path := fmt.Sprintf("/api/2.1/unity-catalog/resource-quotas/%v/%v/%v", request.ParentSecurableType, request.ParentFullName, request.QuotaName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getQuotaResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getQuotaResponse) return &getQuotaResponse, err } func (a *resourceQuotasImpl) ListQuotas(ctx context.Context, request ListQuotasRequest) (*ListQuotasResponse, error) { var listQuotasResponse ListQuotasResponse path := "/api/2.1/unity-catalog/resource-quotas/all-resource-quotas" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listQuotasResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listQuotasResponse) return &listQuotasResponse, err } @@ -872,47 +955,52 @@ type schemasImpl struct { func (a *schemasImpl) Create(ctx context.Context, request CreateSchema) (*SchemaInfo, error) { var schemaInfo SchemaInfo path := "/api/2.1/unity-catalog/schemas" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &schemaInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &schemaInfo) return &schemaInfo, err } func (a *schemasImpl) Delete(ctx context.Context, request DeleteSchemaRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/schemas/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *schemasImpl) Get(ctx context.Context, request GetSchemaRequest) (*SchemaInfo, error) { var schemaInfo SchemaInfo path := fmt.Sprintf("/api/2.1/unity-catalog/schemas/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &schemaInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &schemaInfo) return &schemaInfo, err } func (a *schemasImpl) List(ctx context.Context, request ListSchemasRequest) (*ListSchemasResponse, error) { var listSchemasResponse ListSchemasResponse path := "/api/2.1/unity-catalog/schemas" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listSchemasResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSchemasResponse) return &listSchemasResponse, err } func (a *schemasImpl) Update(ctx context.Context, request UpdateSchema) (*SchemaInfo, error) { var schemaInfo SchemaInfo path := fmt.Sprintf("/api/2.1/unity-catalog/schemas/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &schemaInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &schemaInfo) return &schemaInfo, err } @@ -924,57 +1012,63 @@ type storageCredentialsImpl struct { func (a *storageCredentialsImpl) Create(ctx context.Context, request CreateStorageCredential) (*StorageCredentialInfo, error) { var storageCredentialInfo StorageCredentialInfo path := "/api/2.1/unity-catalog/storage-credentials" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &storageCredentialInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &storageCredentialInfo) return &storageCredentialInfo, err } func (a *storageCredentialsImpl) Delete(ctx context.Context, request DeleteStorageCredentialRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/storage-credentials/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *storageCredentialsImpl) Get(ctx context.Context, request GetStorageCredentialRequest) (*StorageCredentialInfo, error) { var storageCredentialInfo StorageCredentialInfo path := fmt.Sprintf("/api/2.1/unity-catalog/storage-credentials/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &storageCredentialInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &storageCredentialInfo) return &storageCredentialInfo, err } func (a *storageCredentialsImpl) List(ctx context.Context, request ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { var listStorageCredentialsResponse ListStorageCredentialsResponse path := "/api/2.1/unity-catalog/storage-credentials" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listStorageCredentialsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listStorageCredentialsResponse) return &listStorageCredentialsResponse, err } func (a *storageCredentialsImpl) Update(ctx context.Context, request UpdateStorageCredential) (*StorageCredentialInfo, error) { var storageCredentialInfo StorageCredentialInfo path := fmt.Sprintf("/api/2.1/unity-catalog/storage-credentials/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &storageCredentialInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &storageCredentialInfo) return &storageCredentialInfo, err } func (a *storageCredentialsImpl) Validate(ctx context.Context, request ValidateStorageCredential) (*ValidateStorageCredentialResponse, error) { var validateStorageCredentialResponse ValidateStorageCredentialResponse path := "/api/2.1/unity-catalog/validate-storage-credentials" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &validateStorageCredentialResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &validateStorageCredentialResponse) return &validateStorageCredentialResponse, err } @@ -986,27 +1080,30 @@ type systemSchemasImpl struct { func (a *systemSchemasImpl) Disable(ctx context.Context, request DisableRequest) error { var disableResponse DisableResponse path := fmt.Sprintf("/api/2.1/unity-catalog/metastores/%v/systemschemas/%v", request.MetastoreId, request.SchemaName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &disableResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &disableResponse) return err } func (a *systemSchemasImpl) Enable(ctx context.Context, request EnableRequest) error { var enableResponse EnableResponse path := fmt.Sprintf("/api/2.1/unity-catalog/metastores/%v/systemschemas/%v", request.MetastoreId, request.SchemaName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, nil, &enableResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, nil, &enableResponse) return err } func (a *systemSchemasImpl) List(ctx context.Context, request ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { var listSystemSchemasResponse ListSystemSchemasResponse path := fmt.Sprintf("/api/2.1/unity-catalog/metastores/%v/systemschemas", request.MetastoreId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listSystemSchemasResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSystemSchemasResponse) return &listSystemSchemasResponse, err } @@ -1018,19 +1115,21 @@ type tableConstraintsImpl struct { func (a *tableConstraintsImpl) Create(ctx context.Context, request CreateTableConstraint) (*TableConstraint, error) { var tableConstraint TableConstraint path := "/api/2.1/unity-catalog/constraints" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &tableConstraint) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &tableConstraint) return &tableConstraint, err } func (a *tableConstraintsImpl) Delete(ctx context.Context, request DeleteTableConstraintRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/constraints/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } @@ -1042,55 +1141,61 @@ type tablesImpl struct { func (a *tablesImpl) Delete(ctx context.Context, request DeleteTableRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *tablesImpl) Exists(ctx context.Context, request ExistsRequest) (*TableExistsResponse, error) { var tableExistsResponse TableExistsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v/exists", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &tableExistsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &tableExistsResponse) return &tableExistsResponse, err } func (a *tablesImpl) Get(ctx context.Context, request GetTableRequest) (*TableInfo, error) { var tableInfo TableInfo path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &tableInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &tableInfo) return &tableInfo, err } func (a *tablesImpl) List(ctx context.Context, request ListTablesRequest) (*ListTablesResponse, error) { var listTablesResponse ListTablesResponse path := "/api/2.1/unity-catalog/tables" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listTablesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listTablesResponse) return &listTablesResponse, err } func (a *tablesImpl) ListSummaries(ctx context.Context, request ListSummariesRequest) (*ListTableSummariesResponse, error) { var listTableSummariesResponse ListTableSummariesResponse path := "/api/2.1/unity-catalog/table-summaries" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listTableSummariesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listTableSummariesResponse) return &listTableSummariesResponse, err } func (a *tablesImpl) Update(ctx context.Context, request UpdateTableRequest) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.1/unity-catalog/tables/%v", request.FullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) return err } @@ -1102,10 +1207,11 @@ type temporaryTableCredentialsImpl struct { func (a *temporaryTableCredentialsImpl) GenerateTemporaryTableCredentials(ctx context.Context, request GenerateTemporaryTableCredentialRequest) (*GenerateTemporaryTableCredentialResponse, error) { var generateTemporaryTableCredentialResponse GenerateTemporaryTableCredentialResponse path := "/api/2.0/unity-catalog/temporary-table-credentials" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &generateTemporaryTableCredentialResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &generateTemporaryTableCredentialResponse) return &generateTemporaryTableCredentialResponse, err } @@ -1117,46 +1223,51 @@ type volumesImpl struct { func (a *volumesImpl) Create(ctx context.Context, request CreateVolumeRequestContent) (*VolumeInfo, error) { var volumeInfo VolumeInfo path := "/api/2.1/unity-catalog/volumes" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &volumeInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &volumeInfo) return &volumeInfo, err } func (a *volumesImpl) Delete(ctx context.Context, request DeleteVolumeRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/volumes/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *volumesImpl) List(ctx context.Context, request ListVolumesRequest) (*ListVolumesResponseContent, error) { var listVolumesResponseContent ListVolumesResponseContent path := "/api/2.1/unity-catalog/volumes" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listVolumesResponseContent) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listVolumesResponseContent) return &listVolumesResponseContent, err } func (a *volumesImpl) Read(ctx context.Context, request ReadVolumeRequest) (*VolumeInfo, error) { var volumeInfo VolumeInfo path := fmt.Sprintf("/api/2.1/unity-catalog/volumes/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &volumeInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &volumeInfo) return &volumeInfo, err } func (a *volumesImpl) Update(ctx context.Context, request UpdateVolumeRequestContent) (*VolumeInfo, error) { var volumeInfo VolumeInfo path := fmt.Sprintf("/api/2.1/unity-catalog/volumes/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &volumeInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &volumeInfo) return &volumeInfo, err } @@ -1168,37 +1279,41 @@ type workspaceBindingsImpl struct { func (a *workspaceBindingsImpl) Get(ctx context.Context, request GetWorkspaceBindingRequest) (*CurrentWorkspaceBindings, error) { var currentWorkspaceBindings CurrentWorkspaceBindings path := fmt.Sprintf("/api/2.1/unity-catalog/workspace-bindings/catalogs/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, ¤tWorkspaceBindings) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, ¤tWorkspaceBindings) return ¤tWorkspaceBindings, err } func (a *workspaceBindingsImpl) GetBindings(ctx context.Context, request GetBindingsRequest) (*WorkspaceBindingsResponse, error) { var workspaceBindingsResponse WorkspaceBindingsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/bindings/%v/%v", request.SecurableType, request.SecurableName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &workspaceBindingsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspaceBindingsResponse) return &workspaceBindingsResponse, err } func (a *workspaceBindingsImpl) Update(ctx context.Context, request UpdateWorkspaceBindings) (*CurrentWorkspaceBindings, error) { var currentWorkspaceBindings CurrentWorkspaceBindings path := fmt.Sprintf("/api/2.1/unity-catalog/workspace-bindings/catalogs/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, ¤tWorkspaceBindings) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, ¤tWorkspaceBindings) return ¤tWorkspaceBindings, err } func (a *workspaceBindingsImpl) UpdateBindings(ctx context.Context, request UpdateWorkspaceBindingsParameters) (*WorkspaceBindingsResponse, error) { var workspaceBindingsResponse WorkspaceBindingsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/bindings/%v/%v", request.SecurableType, request.SecurableName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &workspaceBindingsResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &workspaceBindingsResponse) return &workspaceBindingsResponse, err } diff --git a/service/cleanrooms/impl.go b/service/cleanrooms/impl.go index 50937ebf0..5e47e439a 100755 --- a/service/cleanrooms/impl.go +++ b/service/cleanrooms/impl.go @@ -18,47 +18,52 @@ type cleanRoomAssetsImpl struct { func (a *cleanRoomAssetsImpl) Create(ctx context.Context, request CreateCleanRoomAssetRequest) (*CleanRoomAsset, error) { var cleanRoomAsset CleanRoomAsset path := fmt.Sprintf("/api/2.0/clean-rooms/%v/assets", request.CleanRoomName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.Asset, &cleanRoomAsset) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Asset, &cleanRoomAsset) return &cleanRoomAsset, err } func (a *cleanRoomAssetsImpl) Delete(ctx context.Context, request DeleteCleanRoomAssetRequest) error { var deleteCleanRoomAssetResponse DeleteCleanRoomAssetResponse path := fmt.Sprintf("/api/2.0/clean-rooms/%v/assets/%v/%v", request.CleanRoomName, request.AssetType, request.AssetFullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteCleanRoomAssetResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCleanRoomAssetResponse) return err } func (a *cleanRoomAssetsImpl) Get(ctx context.Context, request GetCleanRoomAssetRequest) (*CleanRoomAsset, error) { var cleanRoomAsset CleanRoomAsset path := fmt.Sprintf("/api/2.0/clean-rooms/%v/assets/%v/%v", request.CleanRoomName, request.AssetType, request.AssetFullName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &cleanRoomAsset) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &cleanRoomAsset) return &cleanRoomAsset, err } func (a *cleanRoomAssetsImpl) List(ctx context.Context, request ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { var listCleanRoomAssetsResponse ListCleanRoomAssetsResponse path := fmt.Sprintf("/api/2.0/clean-rooms/%v/assets", request.CleanRoomName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listCleanRoomAssetsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCleanRoomAssetsResponse) return &listCleanRoomAssetsResponse, err } func (a *cleanRoomAssetsImpl) Update(ctx context.Context, request UpdateCleanRoomAssetRequest) (*CleanRoomAsset, error) { var cleanRoomAsset CleanRoomAsset path := fmt.Sprintf("/api/2.0/clean-rooms/%v/assets/%v/%v", request.CleanRoomName, request.AssetType, request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request.Asset, &cleanRoomAsset) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Asset, &cleanRoomAsset) return &cleanRoomAsset, err } @@ -70,9 +75,10 @@ type cleanRoomTaskRunsImpl struct { func (a *cleanRoomTaskRunsImpl) List(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { var listCleanRoomNotebookTaskRunsResponse ListCleanRoomNotebookTaskRunsResponse path := fmt.Sprintf("/api/2.0/clean-rooms/%v/runs", request.CleanRoomName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listCleanRoomNotebookTaskRunsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCleanRoomNotebookTaskRunsResponse) return &listCleanRoomNotebookTaskRunsResponse, err } @@ -84,56 +90,62 @@ type cleanRoomsImpl struct { func (a *cleanRoomsImpl) Create(ctx context.Context, request CreateCleanRoomRequest) (*CleanRoom, error) { var cleanRoom CleanRoom path := "/api/2.0/clean-rooms" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.CleanRoom, &cleanRoom) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.CleanRoom, &cleanRoom) return &cleanRoom, err } func (a *cleanRoomsImpl) CreateOutputCatalog(ctx context.Context, request CreateCleanRoomOutputCatalogRequest) (*CreateCleanRoomOutputCatalogResponse, error) { var createCleanRoomOutputCatalogResponse CreateCleanRoomOutputCatalogResponse path := fmt.Sprintf("/api/2.0/clean-rooms/%v/output-catalogs", request.CleanRoomName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.OutputCatalog, &createCleanRoomOutputCatalogResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.OutputCatalog, &createCleanRoomOutputCatalogResponse) return &createCleanRoomOutputCatalogResponse, err } func (a *cleanRoomsImpl) Delete(ctx context.Context, request DeleteCleanRoomRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/clean-rooms/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *cleanRoomsImpl) Get(ctx context.Context, request GetCleanRoomRequest) (*CleanRoom, error) { var cleanRoom CleanRoom path := fmt.Sprintf("/api/2.0/clean-rooms/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &cleanRoom) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &cleanRoom) return &cleanRoom, err } func (a *cleanRoomsImpl) List(ctx context.Context, request ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { var listCleanRoomsResponse ListCleanRoomsResponse path := "/api/2.0/clean-rooms" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listCleanRoomsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listCleanRoomsResponse) return &listCleanRoomsResponse, err } func (a *cleanRoomsImpl) Update(ctx context.Context, request UpdateCleanRoomRequest) (*CleanRoom, error) { var cleanRoom CleanRoom path := fmt.Sprintf("/api/2.0/clean-rooms/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &cleanRoom) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &cleanRoom) return &cleanRoom, err } diff --git a/service/compute/impl.go b/service/compute/impl.go index abf2c76ff..4b4d7f6bb 100755 --- a/service/compute/impl.go +++ b/service/compute/impl.go @@ -18,86 +18,95 @@ type clusterPoliciesImpl struct { func (a *clusterPoliciesImpl) Create(ctx context.Context, request CreatePolicy) (*CreatePolicyResponse, error) { var createPolicyResponse CreatePolicyResponse path := "/api/2.0/policies/clusters/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createPolicyResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createPolicyResponse) return &createPolicyResponse, err } func (a *clusterPoliciesImpl) Delete(ctx context.Context, request DeletePolicy) error { var deletePolicyResponse DeletePolicyResponse path := "/api/2.0/policies/clusters/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deletePolicyResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deletePolicyResponse) return err } func (a *clusterPoliciesImpl) Edit(ctx context.Context, request EditPolicy) error { var editPolicyResponse EditPolicyResponse path := "/api/2.0/policies/clusters/edit" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &editPolicyResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editPolicyResponse) return err } func (a *clusterPoliciesImpl) Get(ctx context.Context, request GetClusterPolicyRequest) (*Policy, error) { var policy Policy path := "/api/2.0/policies/clusters/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &policy) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &policy) return &policy, err } func (a *clusterPoliciesImpl) GetPermissionLevels(ctx context.Context, request GetClusterPolicyPermissionLevelsRequest) (*GetClusterPolicyPermissionLevelsResponse, error) { var getClusterPolicyPermissionLevelsResponse GetClusterPolicyPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/cluster-policies/%v/permissionLevels", request.ClusterPolicyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getClusterPolicyPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getClusterPolicyPermissionLevelsResponse) return &getClusterPolicyPermissionLevelsResponse, err } func (a *clusterPoliciesImpl) GetPermissions(ctx context.Context, request GetClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { var clusterPolicyPermissions ClusterPolicyPermissions path := fmt.Sprintf("/api/2.0/permissions/cluster-policies/%v", request.ClusterPolicyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &clusterPolicyPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &clusterPolicyPermissions) return &clusterPolicyPermissions, err } func (a *clusterPoliciesImpl) List(ctx context.Context, request ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { var listPoliciesResponse ListPoliciesResponse path := "/api/2.0/policies/clusters/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listPoliciesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listPoliciesResponse) return &listPoliciesResponse, err } func (a *clusterPoliciesImpl) SetPermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { var clusterPolicyPermissions ClusterPolicyPermissions path := fmt.Sprintf("/api/2.0/permissions/cluster-policies/%v", request.ClusterPolicyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &clusterPolicyPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &clusterPolicyPermissions) return &clusterPolicyPermissions, err } func (a *clusterPoliciesImpl) UpdatePermissions(ctx context.Context, request ClusterPolicyPermissionsRequest) (*ClusterPolicyPermissions, error) { var clusterPolicyPermissions ClusterPolicyPermissions path := fmt.Sprintf("/api/2.0/permissions/cluster-policies/%v", request.ClusterPolicyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &clusterPolicyPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &clusterPolicyPermissions) return &clusterPolicyPermissions, err } @@ -109,203 +118,224 @@ type clustersImpl struct { func (a *clustersImpl) ChangeOwner(ctx context.Context, request ChangeClusterOwner) error { var changeClusterOwnerResponse ChangeClusterOwnerResponse path := "/api/2.1/clusters/change-owner" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &changeClusterOwnerResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &changeClusterOwnerResponse) return err } func (a *clustersImpl) Create(ctx context.Context, request CreateCluster) (*CreateClusterResponse, error) { var createClusterResponse CreateClusterResponse path := "/api/2.1/clusters/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createClusterResponse) return &createClusterResponse, err } func (a *clustersImpl) Delete(ctx context.Context, request DeleteCluster) error { var deleteClusterResponse DeleteClusterResponse path := "/api/2.1/clusters/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteClusterResponse) return err } func (a *clustersImpl) Edit(ctx context.Context, request EditCluster) error { var editClusterResponse EditClusterResponse path := "/api/2.1/clusters/edit" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &editClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editClusterResponse) return err } func (a *clustersImpl) Events(ctx context.Context, request GetEvents) (*GetEventsResponse, error) { var getEventsResponse GetEventsResponse path := "/api/2.1/clusters/events" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &getEventsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &getEventsResponse) return &getEventsResponse, err } func (a *clustersImpl) Get(ctx context.Context, request GetClusterRequest) (*ClusterDetails, error) { var clusterDetails ClusterDetails path := "/api/2.1/clusters/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &clusterDetails) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &clusterDetails) return &clusterDetails, err } func (a *clustersImpl) GetPermissionLevels(ctx context.Context, request GetClusterPermissionLevelsRequest) (*GetClusterPermissionLevelsResponse, error) { var getClusterPermissionLevelsResponse GetClusterPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/clusters/%v/permissionLevels", request.ClusterId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getClusterPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getClusterPermissionLevelsResponse) return &getClusterPermissionLevelsResponse, err } func (a *clustersImpl) GetPermissions(ctx context.Context, request GetClusterPermissionsRequest) (*ClusterPermissions, error) { var clusterPermissions ClusterPermissions path := fmt.Sprintf("/api/2.0/permissions/clusters/%v", request.ClusterId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &clusterPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &clusterPermissions) return &clusterPermissions, err } func (a *clustersImpl) List(ctx context.Context, request ListClustersRequest) (*ListClustersResponse, error) { var listClustersResponse ListClustersResponse path := "/api/2.1/clusters/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listClustersResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listClustersResponse) return &listClustersResponse, err } func (a *clustersImpl) ListNodeTypes(ctx context.Context) (*ListNodeTypesResponse, error) { var listNodeTypesResponse ListNodeTypesResponse path := "/api/2.1/clusters/list-node-types" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listNodeTypesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listNodeTypesResponse) return &listNodeTypesResponse, err } func (a *clustersImpl) ListZones(ctx context.Context) (*ListAvailableZonesResponse, error) { var listAvailableZonesResponse ListAvailableZonesResponse path := "/api/2.1/clusters/list-zones" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listAvailableZonesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listAvailableZonesResponse) return &listAvailableZonesResponse, err } func (a *clustersImpl) PermanentDelete(ctx context.Context, request PermanentDeleteCluster) error { var permanentDeleteClusterResponse PermanentDeleteClusterResponse path := "/api/2.1/clusters/permanent-delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &permanentDeleteClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &permanentDeleteClusterResponse) return err } func (a *clustersImpl) Pin(ctx context.Context, request PinCluster) error { var pinClusterResponse PinClusterResponse path := "/api/2.1/clusters/pin" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &pinClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &pinClusterResponse) return err } func (a *clustersImpl) Resize(ctx context.Context, request ResizeCluster) error { var resizeClusterResponse ResizeClusterResponse path := "/api/2.1/clusters/resize" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &resizeClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &resizeClusterResponse) return err } func (a *clustersImpl) Restart(ctx context.Context, request RestartCluster) error { var restartClusterResponse RestartClusterResponse path := "/api/2.1/clusters/restart" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &restartClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &restartClusterResponse) return err } func (a *clustersImpl) SetPermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) { var clusterPermissions ClusterPermissions path := fmt.Sprintf("/api/2.0/permissions/clusters/%v", request.ClusterId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &clusterPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &clusterPermissions) return &clusterPermissions, err } func (a *clustersImpl) SparkVersions(ctx context.Context) (*GetSparkVersionsResponse, error) { var getSparkVersionsResponse GetSparkVersionsResponse path := "/api/2.1/clusters/spark-versions" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &getSparkVersionsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getSparkVersionsResponse) return &getSparkVersionsResponse, err } func (a *clustersImpl) Start(ctx context.Context, request StartCluster) error { var startClusterResponse StartClusterResponse path := "/api/2.1/clusters/start" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &startClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &startClusterResponse) return err } func (a *clustersImpl) Unpin(ctx context.Context, request UnpinCluster) error { var unpinClusterResponse UnpinClusterResponse path := "/api/2.1/clusters/unpin" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &unpinClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &unpinClusterResponse) return err } func (a *clustersImpl) Update(ctx context.Context, request UpdateCluster) error { var updateClusterResponse UpdateClusterResponse path := "/api/2.1/clusters/update" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &updateClusterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &updateClusterResponse) return err } func (a *clustersImpl) UpdatePermissions(ctx context.Context, request ClusterPermissionsRequest) (*ClusterPermissions, error) { var clusterPermissions ClusterPermissions path := fmt.Sprintf("/api/2.0/permissions/clusters/%v", request.ClusterId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &clusterPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &clusterPermissions) return &clusterPermissions, err } @@ -317,58 +347,64 @@ type commandExecutionImpl struct { func (a *commandExecutionImpl) Cancel(ctx context.Context, request CancelCommand) error { var cancelResponse CancelResponse path := "/api/1.2/commands/cancel" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &cancelResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &cancelResponse) return err } func (a *commandExecutionImpl) CommandStatus(ctx context.Context, request CommandStatusRequest) (*CommandStatusResponse, error) { var commandStatusResponse CommandStatusResponse path := "/api/1.2/commands/status" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &commandStatusResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &commandStatusResponse) return &commandStatusResponse, err } func (a *commandExecutionImpl) ContextStatus(ctx context.Context, request ContextStatusRequest) (*ContextStatusResponse, error) { var contextStatusResponse ContextStatusResponse path := "/api/1.2/contexts/status" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &contextStatusResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &contextStatusResponse) return &contextStatusResponse, err } func (a *commandExecutionImpl) Create(ctx context.Context, request CreateContext) (*Created, error) { var created Created path := "/api/1.2/contexts/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &created) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &created) return &created, err } func (a *commandExecutionImpl) Destroy(ctx context.Context, request DestroyContext) error { var destroyResponse DestroyResponse path := "/api/1.2/contexts/destroy" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &destroyResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &destroyResponse) return err } func (a *commandExecutionImpl) Execute(ctx context.Context, request Command) (*Created, error) { var created Created path := "/api/1.2/commands/execute" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &created) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &created) return &created, err } @@ -380,45 +416,50 @@ type globalInitScriptsImpl struct { func (a *globalInitScriptsImpl) Create(ctx context.Context, request GlobalInitScriptCreateRequest) (*CreateResponse, error) { var createResponse CreateResponse path := "/api/2.0/global-init-scripts" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createResponse) return &createResponse, err } func (a *globalInitScriptsImpl) Delete(ctx context.Context, request DeleteGlobalInitScriptRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/global-init-scripts/%v", request.ScriptId) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *globalInitScriptsImpl) Get(ctx context.Context, request GetGlobalInitScriptRequest) (*GlobalInitScriptDetailsWithContent, error) { var globalInitScriptDetailsWithContent GlobalInitScriptDetailsWithContent path := fmt.Sprintf("/api/2.0/global-init-scripts/%v", request.ScriptId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &globalInitScriptDetailsWithContent) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &globalInitScriptDetailsWithContent) return &globalInitScriptDetailsWithContent, err } func (a *globalInitScriptsImpl) List(ctx context.Context) (*ListGlobalInitScriptsResponse, error) { var listGlobalInitScriptsResponse ListGlobalInitScriptsResponse path := "/api/2.0/global-init-scripts" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listGlobalInitScriptsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listGlobalInitScriptsResponse) return &listGlobalInitScriptsResponse, err } func (a *globalInitScriptsImpl) Update(ctx context.Context, request GlobalInitScriptUpdateRequest) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/global-init-scripts/%v", request.ScriptId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) return err } @@ -430,86 +471,95 @@ type instancePoolsImpl struct { func (a *instancePoolsImpl) Create(ctx context.Context, request CreateInstancePool) (*CreateInstancePoolResponse, error) { var createInstancePoolResponse CreateInstancePoolResponse path := "/api/2.0/instance-pools/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createInstancePoolResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createInstancePoolResponse) return &createInstancePoolResponse, err } func (a *instancePoolsImpl) Delete(ctx context.Context, request DeleteInstancePool) error { var deleteInstancePoolResponse DeleteInstancePoolResponse path := "/api/2.0/instance-pools/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteInstancePoolResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteInstancePoolResponse) return err } func (a *instancePoolsImpl) Edit(ctx context.Context, request EditInstancePool) error { var editInstancePoolResponse EditInstancePoolResponse path := "/api/2.0/instance-pools/edit" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &editInstancePoolResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editInstancePoolResponse) return err } func (a *instancePoolsImpl) Get(ctx context.Context, request GetInstancePoolRequest) (*GetInstancePool, error) { var getInstancePool GetInstancePool path := "/api/2.0/instance-pools/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getInstancePool) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getInstancePool) return &getInstancePool, err } func (a *instancePoolsImpl) GetPermissionLevels(ctx context.Context, request GetInstancePoolPermissionLevelsRequest) (*GetInstancePoolPermissionLevelsResponse, error) { var getInstancePoolPermissionLevelsResponse GetInstancePoolPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/instance-pools/%v/permissionLevels", request.InstancePoolId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getInstancePoolPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getInstancePoolPermissionLevelsResponse) return &getInstancePoolPermissionLevelsResponse, err } func (a *instancePoolsImpl) GetPermissions(ctx context.Context, request GetInstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { var instancePoolPermissions InstancePoolPermissions path := fmt.Sprintf("/api/2.0/permissions/instance-pools/%v", request.InstancePoolId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &instancePoolPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &instancePoolPermissions) return &instancePoolPermissions, err } func (a *instancePoolsImpl) List(ctx context.Context) (*ListInstancePools, error) { var listInstancePools ListInstancePools path := "/api/2.0/instance-pools/list" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listInstancePools) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listInstancePools) return &listInstancePools, err } func (a *instancePoolsImpl) SetPermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { var instancePoolPermissions InstancePoolPermissions path := fmt.Sprintf("/api/2.0/permissions/instance-pools/%v", request.InstancePoolId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &instancePoolPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &instancePoolPermissions) return &instancePoolPermissions, err } func (a *instancePoolsImpl) UpdatePermissions(ctx context.Context, request InstancePoolPermissionsRequest) (*InstancePoolPermissions, error) { var instancePoolPermissions InstancePoolPermissions path := fmt.Sprintf("/api/2.0/permissions/instance-pools/%v", request.InstancePoolId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &instancePoolPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &instancePoolPermissions) return &instancePoolPermissions, err } @@ -521,39 +571,43 @@ type instanceProfilesImpl struct { func (a *instanceProfilesImpl) Add(ctx context.Context, request AddInstanceProfile) error { var addResponse AddResponse path := "/api/2.0/instance-profiles/add" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &addResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &addResponse) return err } func (a *instanceProfilesImpl) Edit(ctx context.Context, request InstanceProfile) error { var editResponse EditResponse path := "/api/2.0/instance-profiles/edit" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &editResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editResponse) return err } func (a *instanceProfilesImpl) List(ctx context.Context) (*ListInstanceProfilesResponse, error) { var listInstanceProfilesResponse ListInstanceProfilesResponse path := "/api/2.0/instance-profiles/list" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listInstanceProfilesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listInstanceProfilesResponse) return &listInstanceProfilesResponse, err } func (a *instanceProfilesImpl) Remove(ctx context.Context, request RemoveInstanceProfile) error { var removeResponse RemoveResponse path := "/api/2.0/instance-profiles/remove" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &removeResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &removeResponse) return err } @@ -565,38 +619,42 @@ type librariesImpl struct { func (a *librariesImpl) AllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error) { var listAllClusterLibraryStatusesResponse ListAllClusterLibraryStatusesResponse path := "/api/2.0/libraries/all-cluster-statuses" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listAllClusterLibraryStatusesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listAllClusterLibraryStatusesResponse) return &listAllClusterLibraryStatusesResponse, err } func (a *librariesImpl) ClusterStatus(ctx context.Context, request ClusterStatus) (*ClusterLibraryStatuses, error) { var clusterLibraryStatuses ClusterLibraryStatuses path := "/api/2.0/libraries/cluster-status" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &clusterLibraryStatuses) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &clusterLibraryStatuses) return &clusterLibraryStatuses, err } func (a *librariesImpl) Install(ctx context.Context, request InstallLibraries) error { var installLibrariesResponse InstallLibrariesResponse path := "/api/2.0/libraries/install" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &installLibrariesResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &installLibrariesResponse) return err } func (a *librariesImpl) Uninstall(ctx context.Context, request UninstallLibraries) error { var uninstallLibrariesResponse UninstallLibrariesResponse path := "/api/2.0/libraries/uninstall" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &uninstallLibrariesResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &uninstallLibrariesResponse) return err } @@ -608,28 +666,31 @@ type policyComplianceForClustersImpl struct { func (a *policyComplianceForClustersImpl) EnforceCompliance(ctx context.Context, request EnforceClusterComplianceRequest) (*EnforceClusterComplianceResponse, error) { var enforceClusterComplianceResponse EnforceClusterComplianceResponse path := "/api/2.0/policies/clusters/enforce-compliance" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &enforceClusterComplianceResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &enforceClusterComplianceResponse) return &enforceClusterComplianceResponse, err } func (a *policyComplianceForClustersImpl) GetCompliance(ctx context.Context, request GetClusterComplianceRequest) (*GetClusterComplianceResponse, error) { var getClusterComplianceResponse GetClusterComplianceResponse path := "/api/2.0/policies/clusters/get-compliance" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getClusterComplianceResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getClusterComplianceResponse) return &getClusterComplianceResponse, err } func (a *policyComplianceForClustersImpl) ListCompliance(ctx context.Context, request ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { var listClusterCompliancesResponse ListClusterCompliancesResponse path := "/api/2.0/policies/clusters/list-compliance" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listClusterCompliancesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listClusterCompliancesResponse) return &listClusterCompliancesResponse, err } @@ -641,17 +702,19 @@ type policyFamiliesImpl struct { func (a *policyFamiliesImpl) Get(ctx context.Context, request GetPolicyFamilyRequest) (*PolicyFamily, error) { var policyFamily PolicyFamily path := fmt.Sprintf("/api/2.0/policy-families/%v", request.PolicyFamilyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &policyFamily) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &policyFamily) return &policyFamily, err } func (a *policyFamiliesImpl) List(ctx context.Context, request ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { var listPolicyFamiliesResponse ListPolicyFamiliesResponse path := "/api/2.0/policy-families" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listPolicyFamiliesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listPolicyFamiliesResponse) return &listPolicyFamiliesResponse, err } diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index babb7fc7c..c23deddd6 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -18,47 +18,52 @@ type genieImpl struct { func (a *genieImpl) CreateMessage(ctx context.Context, request GenieCreateConversationMessageRequest) (*GenieMessage, error) { var genieMessage GenieMessage path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages", request.SpaceId, request.ConversationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &genieMessage) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &genieMessage) return &genieMessage, err } func (a *genieImpl) ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) { var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/execute-query", request.SpaceId, request.ConversationId, request.MessageId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &genieGetMessageQueryResultResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &genieGetMessageQueryResultResponse) return &genieGetMessageQueryResultResponse, err } func (a *genieImpl) GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) { var genieMessage GenieMessage path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v", request.SpaceId, request.ConversationId, request.MessageId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &genieMessage) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieMessage) return &genieMessage, err } func (a *genieImpl) GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) { var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/query-result", request.SpaceId, request.ConversationId, request.MessageId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &genieGetMessageQueryResultResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieGetMessageQueryResultResponse) return &genieGetMessageQueryResultResponse, err } func (a *genieImpl) StartConversation(ctx context.Context, request GenieStartConversationMessageRequest) (*GenieStartConversationResponse, error) { var genieStartConversationResponse GenieStartConversationResponse path := fmt.Sprintf("/api/2.0/genie/spaces/%v/start-conversation", request.SpaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &genieStartConversationResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &genieStartConversationResponse) return &genieStartConversationResponse, err } @@ -70,168 +75,186 @@ type lakeviewImpl struct { func (a *lakeviewImpl) Create(ctx context.Context, request CreateDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := "/api/2.0/lakeview/dashboards" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.Dashboard, &dashboard) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Dashboard, &dashboard) return &dashboard, err } func (a *lakeviewImpl) CreateSchedule(ctx context.Context, request CreateScheduleRequest) (*Schedule, error) { var schedule Schedule path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.Schedule, &schedule) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Schedule, &schedule) return &schedule, err } func (a *lakeviewImpl) CreateSubscription(ctx context.Context, request CreateSubscriptionRequest) (*Subscription, error) { var subscription Subscription path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules/%v/subscriptions", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.Subscription, &subscription) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Subscription, &subscription) return &subscription, err } func (a *lakeviewImpl) DeleteSchedule(ctx context.Context, request DeleteScheduleRequest) error { var deleteScheduleResponse DeleteScheduleResponse path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules/%v", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteScheduleResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteScheduleResponse) return err } func (a *lakeviewImpl) DeleteSubscription(ctx context.Context, request DeleteSubscriptionRequest) error { var deleteSubscriptionResponse DeleteSubscriptionResponse path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules/%v/subscriptions/%v", request.DashboardId, request.ScheduleId, request.SubscriptionId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteSubscriptionResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteSubscriptionResponse) return err } func (a *lakeviewImpl) Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &dashboard) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &dashboard) return &dashboard, err } func (a *lakeviewImpl) GetPublished(ctx context.Context, request GetPublishedDashboardRequest) (*PublishedDashboard, error) { var publishedDashboard PublishedDashboard path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/published", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &publishedDashboard) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &publishedDashboard) return &publishedDashboard, err } func (a *lakeviewImpl) GetSchedule(ctx context.Context, request GetScheduleRequest) (*Schedule, error) { var schedule Schedule path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules/%v", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &schedule) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &schedule) return &schedule, err } func (a *lakeviewImpl) GetSubscription(ctx context.Context, request GetSubscriptionRequest) (*Subscription, error) { var subscription Subscription path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules/%v/subscriptions/%v", request.DashboardId, request.ScheduleId, request.SubscriptionId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &subscription) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &subscription) return &subscription, err } func (a *lakeviewImpl) List(ctx context.Context, request ListDashboardsRequest) (*ListDashboardsResponse, error) { var listDashboardsResponse ListDashboardsResponse path := "/api/2.0/lakeview/dashboards" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listDashboardsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listDashboardsResponse) return &listDashboardsResponse, err } func (a *lakeviewImpl) ListSchedules(ctx context.Context, request ListSchedulesRequest) (*ListSchedulesResponse, error) { var listSchedulesResponse ListSchedulesResponse path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listSchedulesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSchedulesResponse) return &listSchedulesResponse, err } func (a *lakeviewImpl) ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { var listSubscriptionsResponse ListSubscriptionsResponse path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules/%v/subscriptions", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listSubscriptionsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSubscriptionsResponse) return &listSubscriptionsResponse, err } func (a *lakeviewImpl) Migrate(ctx context.Context, request MigrateDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := "/api/2.0/lakeview/dashboards/migrate" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &dashboard) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &dashboard) return &dashboard, err } func (a *lakeviewImpl) Publish(ctx context.Context, request PublishRequest) (*PublishedDashboard, error) { var publishedDashboard PublishedDashboard path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/published", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &publishedDashboard) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &publishedDashboard) return &publishedDashboard, err } func (a *lakeviewImpl) Trash(ctx context.Context, request TrashDashboardRequest) error { var trashDashboardResponse TrashDashboardResponse path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &trashDashboardResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &trashDashboardResponse) return err } func (a *lakeviewImpl) Unpublish(ctx context.Context, request UnpublishDashboardRequest) error { var unpublishDashboardResponse UnpublishDashboardResponse path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/published", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &unpublishDashboardResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &unpublishDashboardResponse) return err } func (a *lakeviewImpl) Update(ctx context.Context, request UpdateDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request.Dashboard, &dashboard) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Dashboard, &dashboard) return &dashboard, err } func (a *lakeviewImpl) UpdateSchedule(ctx context.Context, request UpdateScheduleRequest) (*Schedule, error) { var schedule Schedule path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules/%v", request.DashboardId, request.ScheduleId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request.Schedule, &schedule) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request.Schedule, &schedule) return &schedule, err } diff --git a/service/files/impl.go b/service/files/impl.go index 23b25c450..6e83c42cd 100755 --- a/service/files/impl.go +++ b/service/files/impl.go @@ -19,97 +19,107 @@ type dbfsImpl struct { func (a *dbfsImpl) AddBlock(ctx context.Context, request AddBlock) error { var addBlockResponse AddBlockResponse path := "/api/2.0/dbfs/add-block" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &addBlockResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &addBlockResponse) return err } func (a *dbfsImpl) Close(ctx context.Context, request Close) error { var closeResponse CloseResponse path := "/api/2.0/dbfs/close" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &closeResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &closeResponse) return err } func (a *dbfsImpl) Create(ctx context.Context, request Create) (*CreateResponse, error) { var createResponse CreateResponse path := "/api/2.0/dbfs/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createResponse) return &createResponse, err } func (a *dbfsImpl) Delete(ctx context.Context, request Delete) error { var deleteResponse DeleteResponse path := "/api/2.0/dbfs/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteResponse) return err } func (a *dbfsImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*FileInfo, error) { var fileInfo FileInfo path := "/api/2.0/dbfs/get-status" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &fileInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &fileInfo) return &fileInfo, err } func (a *dbfsImpl) List(ctx context.Context, request ListDbfsRequest) (*ListStatusResponse, error) { var listStatusResponse ListStatusResponse path := "/api/2.0/dbfs/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listStatusResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listStatusResponse) return &listStatusResponse, err } func (a *dbfsImpl) Mkdirs(ctx context.Context, request MkDirs) error { var mkDirsResponse MkDirsResponse path := "/api/2.0/dbfs/mkdirs" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &mkDirsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &mkDirsResponse) return err } func (a *dbfsImpl) Move(ctx context.Context, request Move) error { var moveResponse MoveResponse path := "/api/2.0/dbfs/move" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &moveResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &moveResponse) return err } func (a *dbfsImpl) Put(ctx context.Context, request Put) error { var putResponse PutResponse path := "/api/2.0/dbfs/put" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &putResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &putResponse) return err } func (a *dbfsImpl) Read(ctx context.Context, request ReadDbfsRequest) (*ReadResponse, error) { var readResponse ReadResponse path := "/api/2.0/dbfs/read" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &readResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &readResponse) return &readResponse, err } @@ -121,66 +131,75 @@ type filesImpl struct { func (a *filesImpl) CreateDirectory(ctx context.Context, request CreateDirectoryRequest) error { var createDirectoryResponse CreateDirectoryResponse path := fmt.Sprintf("/api/2.0/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodPut, path, headers, nil, &createDirectoryResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, nil, &createDirectoryResponse) return err } func (a *filesImpl) Delete(ctx context.Context, request DeleteFileRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *filesImpl) DeleteDirectory(ctx context.Context, request DeleteDirectoryRequest) error { var deleteDirectoryResponse DeleteDirectoryResponse path := fmt.Sprintf("/api/2.0/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteDirectoryResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDirectoryResponse) return err } func (a *filesImpl) Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) { var downloadResponse DownloadResponse path := fmt.Sprintf("/api/2.0/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/octet-stream" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &downloadResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &downloadResponse) return &downloadResponse, err } func (a *filesImpl) GetDirectoryMetadata(ctx context.Context, request GetDirectoryMetadataRequest) error { var getDirectoryMetadataResponse GetDirectoryMetadataResponse path := fmt.Sprintf("/api/2.0/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodHead, path, headers, request, &getDirectoryMetadataResponse) + err := a.client.Do(ctx, http.MethodHead, path, headers, queryParams, request, &getDirectoryMetadataResponse) return err } func (a *filesImpl) GetMetadata(ctx context.Context, request GetMetadataRequest) (*GetMetadataResponse, error) { var getMetadataResponse GetMetadataResponse path := fmt.Sprintf("/api/2.0/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodHead, path, headers, request, &getMetadataResponse) + err := a.client.Do(ctx, http.MethodHead, path, headers, queryParams, request, &getMetadataResponse) return &getMetadataResponse, err } func (a *filesImpl) ListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { var listDirectoryResponse ListDirectoryResponse path := fmt.Sprintf("/api/2.0/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listDirectoryResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listDirectoryResponse) return &listDirectoryResponse, err } func (a *filesImpl) Upload(ctx context.Context, request UploadRequest) error { var uploadResponse UploadResponse path := fmt.Sprintf("/api/2.0/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) + queryParams := make(map[string]any) + queryParams["overwrite"] = request.Overwrite headers := make(map[string]string) headers["Content-Type"] = "application/octet-stream" - err := a.client.Do(ctx, http.MethodPut, path, headers, request.Contents, &uploadResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request.Contents, &uploadResponse) return err } diff --git a/service/iam/impl.go b/service/iam/impl.go index deb73a70c..f41d601c3 100755 --- a/service/iam/impl.go +++ b/service/iam/impl.go @@ -18,28 +18,31 @@ type accountAccessControlImpl struct { func (a *accountAccessControlImpl) GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) { var getAssignableRolesForResourceResponse GetAssignableRolesForResourceResponse path := fmt.Sprintf("/api/2.0/preview/accounts/%v/access-control/assignable-roles", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getAssignableRolesForResourceResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getAssignableRolesForResourceResponse) return &getAssignableRolesForResourceResponse, err } func (a *accountAccessControlImpl) GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) { var ruleSetResponse RuleSetResponse path := fmt.Sprintf("/api/2.0/preview/accounts/%v/access-control/rule-sets", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &ruleSetResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &ruleSetResponse) return &ruleSetResponse, err } func (a *accountAccessControlImpl) UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) { var ruleSetResponse RuleSetResponse path := fmt.Sprintf("/api/2.0/preview/accounts/%v/access-control/rule-sets", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &ruleSetResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &ruleSetResponse) return &ruleSetResponse, err } @@ -51,28 +54,31 @@ type accountAccessControlProxyImpl struct { func (a *accountAccessControlProxyImpl) GetAssignableRolesForResource(ctx context.Context, request GetAssignableRolesForResourceRequest) (*GetAssignableRolesForResourceResponse, error) { var getAssignableRolesForResourceResponse GetAssignableRolesForResourceResponse path := "/api/2.0/preview/accounts/access-control/assignable-roles" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getAssignableRolesForResourceResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getAssignableRolesForResourceResponse) return &getAssignableRolesForResourceResponse, err } func (a *accountAccessControlProxyImpl) GetRuleSet(ctx context.Context, request GetRuleSetRequest) (*RuleSetResponse, error) { var ruleSetResponse RuleSetResponse path := "/api/2.0/preview/accounts/access-control/rule-sets" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &ruleSetResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &ruleSetResponse) return &ruleSetResponse, err } func (a *accountAccessControlProxyImpl) UpdateRuleSet(ctx context.Context, request UpdateRuleSetRequest) (*RuleSetResponse, error) { var ruleSetResponse RuleSetResponse path := "/api/2.0/preview/accounts/access-control/rule-sets" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &ruleSetResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &ruleSetResponse) return &ruleSetResponse, err } @@ -84,56 +90,62 @@ type accountGroupsImpl struct { func (a *accountGroupsImpl) Create(ctx context.Context, request Group) (*Group, error) { var group Group path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &group) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &group) return &group, err } func (a *accountGroupsImpl) Delete(ctx context.Context, request DeleteAccountGroupRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *accountGroupsImpl) Get(ctx context.Context, request GetAccountGroupRequest) (*Group, error) { var group Group path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &group) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &group) return &group, err } func (a *accountGroupsImpl) List(ctx context.Context, request ListAccountGroupsRequest) (*ListGroupsResponse, error) { var listGroupsResponse ListGroupsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listGroupsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listGroupsResponse) return &listGroupsResponse, err } func (a *accountGroupsImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &patchResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err } func (a *accountGroupsImpl) Update(ctx context.Context, request Group) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err } @@ -145,56 +157,62 @@ type accountServicePrincipalsImpl struct { func (a *accountServicePrincipalsImpl) Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) { var servicePrincipal ServicePrincipal path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/ServicePrincipals", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &servicePrincipal) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &servicePrincipal) return &servicePrincipal, err } func (a *accountServicePrincipalsImpl) Delete(ctx context.Context, request DeleteAccountServicePrincipalRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *accountServicePrincipalsImpl) Get(ctx context.Context, request GetAccountServicePrincipalRequest) (*ServicePrincipal, error) { var servicePrincipal ServicePrincipal path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &servicePrincipal) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &servicePrincipal) return &servicePrincipal, err } func (a *accountServicePrincipalsImpl) List(ctx context.Context, request ListAccountServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { var listServicePrincipalResponse ListServicePrincipalResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/ServicePrincipals", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listServicePrincipalResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listServicePrincipalResponse) return &listServicePrincipalResponse, err } func (a *accountServicePrincipalsImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &patchResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err } func (a *accountServicePrincipalsImpl) Update(ctx context.Context, request ServicePrincipal) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err } @@ -206,56 +224,62 @@ type accountUsersImpl struct { func (a *accountUsersImpl) Create(ctx context.Context, request User) (*User, error) { var user User path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &user) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &user) return &user, err } func (a *accountUsersImpl) Delete(ctx context.Context, request DeleteAccountUserRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *accountUsersImpl) Get(ctx context.Context, request GetAccountUserRequest) (*User, error) { var user User path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &user) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &user) return &user, err } func (a *accountUsersImpl) List(ctx context.Context, request ListAccountUsersRequest) (*ListUsersResponse, error) { var listUsersResponse ListUsersResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listUsersResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listUsersResponse) return &listUsersResponse, err } func (a *accountUsersImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &patchResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err } func (a *accountUsersImpl) Update(ctx context.Context, request User) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err } @@ -267,9 +291,10 @@ type currentUserImpl struct { func (a *currentUserImpl) Me(ctx context.Context) (*User, error) { var user User path := "/api/2.0/preview/scim/v2/Me" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &user) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &user) return &user, err } @@ -281,56 +306,62 @@ type groupsImpl struct { func (a *groupsImpl) Create(ctx context.Context, request Group) (*Group, error) { var group Group path := "/api/2.0/preview/scim/v2/Groups" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &group) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &group) return &group, err } func (a *groupsImpl) Delete(ctx context.Context, request DeleteGroupRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/Groups/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *groupsImpl) Get(ctx context.Context, request GetGroupRequest) (*Group, error) { var group Group path := fmt.Sprintf("/api/2.0/preview/scim/v2/Groups/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &group) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &group) return &group, err } func (a *groupsImpl) List(ctx context.Context, request ListGroupsRequest) (*ListGroupsResponse, error) { var listGroupsResponse ListGroupsResponse path := "/api/2.0/preview/scim/v2/Groups" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listGroupsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listGroupsResponse) return &listGroupsResponse, err } func (a *groupsImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/Groups/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &patchResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err } func (a *groupsImpl) Update(ctx context.Context, request Group) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/Groups/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err } @@ -342,10 +373,11 @@ type permissionMigrationImpl struct { func (a *permissionMigrationImpl) MigratePermissions(ctx context.Context, request MigratePermissionsRequest) (*MigratePermissionsResponse, error) { var migratePermissionsResponse MigratePermissionsResponse path := "/api/2.0/permissionmigration" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &migratePermissionsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &migratePermissionsResponse) return &migratePermissionsResponse, err } @@ -357,38 +389,42 @@ type permissionsImpl struct { func (a *permissionsImpl) Get(ctx context.Context, request GetPermissionRequest) (*ObjectPermissions, error) { var objectPermissions ObjectPermissions path := fmt.Sprintf("/api/2.0/permissions/%v/%v", request.RequestObjectType, request.RequestObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &objectPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &objectPermissions) return &objectPermissions, err } func (a *permissionsImpl) GetPermissionLevels(ctx context.Context, request GetPermissionLevelsRequest) (*GetPermissionLevelsResponse, error) { var getPermissionLevelsResponse GetPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/%v/%v/permissionLevels", request.RequestObjectType, request.RequestObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPermissionLevelsResponse) return &getPermissionLevelsResponse, err } func (a *permissionsImpl) Set(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) { var objectPermissions ObjectPermissions path := fmt.Sprintf("/api/2.0/permissions/%v/%v", request.RequestObjectType, request.RequestObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &objectPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &objectPermissions) return &objectPermissions, err } func (a *permissionsImpl) Update(ctx context.Context, request PermissionsRequest) (*ObjectPermissions, error) { var objectPermissions ObjectPermissions path := fmt.Sprintf("/api/2.0/permissions/%v/%v", request.RequestObjectType, request.RequestObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &objectPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &objectPermissions) return &objectPermissions, err } @@ -400,56 +436,62 @@ type servicePrincipalsImpl struct { func (a *servicePrincipalsImpl) Create(ctx context.Context, request ServicePrincipal) (*ServicePrincipal, error) { var servicePrincipal ServicePrincipal path := "/api/2.0/preview/scim/v2/ServicePrincipals" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &servicePrincipal) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &servicePrincipal) return &servicePrincipal, err } func (a *servicePrincipalsImpl) Delete(ctx context.Context, request DeleteServicePrincipalRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/ServicePrincipals/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *servicePrincipalsImpl) Get(ctx context.Context, request GetServicePrincipalRequest) (*ServicePrincipal, error) { var servicePrincipal ServicePrincipal path := fmt.Sprintf("/api/2.0/preview/scim/v2/ServicePrincipals/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &servicePrincipal) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &servicePrincipal) return &servicePrincipal, err } func (a *servicePrincipalsImpl) List(ctx context.Context, request ListServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { var listServicePrincipalResponse ListServicePrincipalResponse path := "/api/2.0/preview/scim/v2/ServicePrincipals" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listServicePrincipalResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listServicePrincipalResponse) return &listServicePrincipalResponse, err } func (a *servicePrincipalsImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/ServicePrincipals/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &patchResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err } func (a *servicePrincipalsImpl) Update(ctx context.Context, request ServicePrincipal) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/ServicePrincipals/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err } @@ -461,94 +503,104 @@ type usersImpl struct { func (a *usersImpl) Create(ctx context.Context, request User) (*User, error) { var user User path := "/api/2.0/preview/scim/v2/Users" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &user) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &user) return &user, err } func (a *usersImpl) Delete(ctx context.Context, request DeleteUserRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/Users/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *usersImpl) Get(ctx context.Context, request GetUserRequest) (*User, error) { var user User path := fmt.Sprintf("/api/2.0/preview/scim/v2/Users/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &user) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &user) return &user, err } func (a *usersImpl) GetPermissionLevels(ctx context.Context) (*GetPasswordPermissionLevelsResponse, error) { var getPasswordPermissionLevelsResponse GetPasswordPermissionLevelsResponse path := "/api/2.0/permissions/authorization/passwords/permissionLevels" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &getPasswordPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getPasswordPermissionLevelsResponse) return &getPasswordPermissionLevelsResponse, err } func (a *usersImpl) GetPermissions(ctx context.Context) (*PasswordPermissions, error) { var passwordPermissions PasswordPermissions path := "/api/2.0/permissions/authorization/passwords" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &passwordPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &passwordPermissions) return &passwordPermissions, err } func (a *usersImpl) List(ctx context.Context, request ListUsersRequest) (*ListUsersResponse, error) { var listUsersResponse ListUsersResponse path := "/api/2.0/preview/scim/v2/Users" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listUsersResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listUsersResponse) return &listUsersResponse, err } func (a *usersImpl) Patch(ctx context.Context, request PartialUpdate) error { var patchResponse PatchResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/Users/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &patchResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err } func (a *usersImpl) SetPermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) { var passwordPermissions PasswordPermissions path := "/api/2.0/permissions/authorization/passwords" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &passwordPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &passwordPermissions) return &passwordPermissions, err } func (a *usersImpl) Update(ctx context.Context, request User) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/preview/scim/v2/Users/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err } func (a *usersImpl) UpdatePermissions(ctx context.Context, request PasswordPermissionsRequest) (*PasswordPermissions, error) { var passwordPermissions PasswordPermissions path := "/api/2.0/permissions/authorization/passwords" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &passwordPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &passwordPermissions) return &passwordPermissions, err } @@ -560,36 +612,40 @@ type workspaceAssignmentImpl struct { func (a *workspaceAssignmentImpl) Delete(ctx context.Context, request DeleteWorkspaceAssignmentRequest) error { var deleteWorkspacePermissionAssignmentResponse DeleteWorkspacePermissionAssignmentResponse path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/permissionassignments/principals/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.PrincipalId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteWorkspacePermissionAssignmentResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteWorkspacePermissionAssignmentResponse) return err } func (a *workspaceAssignmentImpl) Get(ctx context.Context, request GetWorkspaceAssignmentRequest) (*WorkspacePermissions, error) { var workspacePermissions WorkspacePermissions path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/permissionassignments/permissions", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &workspacePermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspacePermissions) return &workspacePermissions, err } func (a *workspaceAssignmentImpl) List(ctx context.Context, request ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { var permissionAssignments PermissionAssignments path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/permissionassignments", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &permissionAssignments) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &permissionAssignments) return &permissionAssignments, err } func (a *workspaceAssignmentImpl) Update(ctx context.Context, request UpdateWorkspaceAssignments) (*PermissionAssignment, error) { var permissionAssignment PermissionAssignment path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/permissionassignments/principals/%v", a.client.ConfiguredAccountID(), request.WorkspaceId, request.PrincipalId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &permissionAssignment) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &permissionAssignment) return &permissionAssignment, err } diff --git a/service/jobs/impl.go b/service/jobs/impl.go index 0929b10c4..d381d8876 100755 --- a/service/jobs/impl.go +++ b/service/jobs/impl.go @@ -18,192 +18,212 @@ type jobsImpl struct { func (a *jobsImpl) CancelAllRuns(ctx context.Context, request CancelAllRuns) error { var cancelAllRunsResponse CancelAllRunsResponse path := "/api/2.1/jobs/runs/cancel-all" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &cancelAllRunsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &cancelAllRunsResponse) return err } func (a *jobsImpl) CancelRun(ctx context.Context, request CancelRun) error { var cancelRunResponse CancelRunResponse path := "/api/2.1/jobs/runs/cancel" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &cancelRunResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &cancelRunResponse) return err } func (a *jobsImpl) Create(ctx context.Context, request CreateJob) (*CreateResponse, error) { var createResponse CreateResponse path := "/api/2.1/jobs/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createResponse) return &createResponse, err } func (a *jobsImpl) Delete(ctx context.Context, request DeleteJob) error { var deleteResponse DeleteResponse path := "/api/2.1/jobs/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteResponse) return err } func (a *jobsImpl) DeleteRun(ctx context.Context, request DeleteRun) error { var deleteRunResponse DeleteRunResponse path := "/api/2.1/jobs/runs/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteRunResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteRunResponse) return err } func (a *jobsImpl) ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error) { var exportRunOutput ExportRunOutput path := "/api/2.1/jobs/runs/export" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &exportRunOutput) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &exportRunOutput) return &exportRunOutput, err } func (a *jobsImpl) Get(ctx context.Context, request GetJobRequest) (*Job, error) { var job Job path := "/api/2.1/jobs/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &job) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &job) return &job, err } func (a *jobsImpl) GetPermissionLevels(ctx context.Context, request GetJobPermissionLevelsRequest) (*GetJobPermissionLevelsResponse, error) { var getJobPermissionLevelsResponse GetJobPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/jobs/%v/permissionLevels", request.JobId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getJobPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getJobPermissionLevelsResponse) return &getJobPermissionLevelsResponse, err } func (a *jobsImpl) GetPermissions(ctx context.Context, request GetJobPermissionsRequest) (*JobPermissions, error) { var jobPermissions JobPermissions path := fmt.Sprintf("/api/2.0/permissions/jobs/%v", request.JobId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &jobPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &jobPermissions) return &jobPermissions, err } func (a *jobsImpl) GetRun(ctx context.Context, request GetRunRequest) (*Run, error) { var run Run path := "/api/2.1/jobs/runs/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &run) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &run) return &run, err } func (a *jobsImpl) GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error) { var runOutput RunOutput path := "/api/2.1/jobs/runs/get-output" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &runOutput) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &runOutput) return &runOutput, err } func (a *jobsImpl) List(ctx context.Context, request ListJobsRequest) (*ListJobsResponse, error) { var listJobsResponse ListJobsResponse path := "/api/2.1/jobs/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listJobsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listJobsResponse) return &listJobsResponse, err } func (a *jobsImpl) ListRuns(ctx context.Context, request ListRunsRequest) (*ListRunsResponse, error) { var listRunsResponse ListRunsResponse path := "/api/2.1/jobs/runs/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listRunsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listRunsResponse) return &listRunsResponse, err } func (a *jobsImpl) RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error) { var repairRunResponse RepairRunResponse path := "/api/2.1/jobs/runs/repair" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &repairRunResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &repairRunResponse) return &repairRunResponse, err } func (a *jobsImpl) Reset(ctx context.Context, request ResetJob) error { var resetResponse ResetResponse path := "/api/2.1/jobs/reset" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &resetResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &resetResponse) return err } func (a *jobsImpl) RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error) { var runNowResponse RunNowResponse path := "/api/2.1/jobs/run-now" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &runNowResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &runNowResponse) return &runNowResponse, err } func (a *jobsImpl) SetPermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) { var jobPermissions JobPermissions path := fmt.Sprintf("/api/2.0/permissions/jobs/%v", request.JobId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &jobPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &jobPermissions) return &jobPermissions, err } func (a *jobsImpl) Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error) { var submitRunResponse SubmitRunResponse path := "/api/2.1/jobs/runs/submit" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &submitRunResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &submitRunResponse) return &submitRunResponse, err } func (a *jobsImpl) Update(ctx context.Context, request UpdateJob) error { var updateResponse UpdateResponse path := "/api/2.1/jobs/update" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &updateResponse) return err } func (a *jobsImpl) UpdatePermissions(ctx context.Context, request JobPermissionsRequest) (*JobPermissions, error) { var jobPermissions JobPermissions path := fmt.Sprintf("/api/2.0/permissions/jobs/%v", request.JobId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &jobPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &jobPermissions) return &jobPermissions, err } @@ -215,27 +235,30 @@ type policyComplianceForJobsImpl struct { func (a *policyComplianceForJobsImpl) EnforceCompliance(ctx context.Context, request EnforcePolicyComplianceRequest) (*EnforcePolicyComplianceResponse, error) { var enforcePolicyComplianceResponse EnforcePolicyComplianceResponse path := "/api/2.0/policies/jobs/enforce-compliance" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &enforcePolicyComplianceResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &enforcePolicyComplianceResponse) return &enforcePolicyComplianceResponse, err } func (a *policyComplianceForJobsImpl) GetCompliance(ctx context.Context, request GetPolicyComplianceRequest) (*GetPolicyComplianceResponse, error) { var getPolicyComplianceResponse GetPolicyComplianceResponse path := "/api/2.0/policies/jobs/get-compliance" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getPolicyComplianceResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPolicyComplianceResponse) return &getPolicyComplianceResponse, err } func (a *policyComplianceForJobsImpl) ListCompliance(ctx context.Context, request ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { var listJobComplianceForPolicyResponse ListJobComplianceForPolicyResponse path := "/api/2.0/policies/jobs/list-compliance" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listJobComplianceForPolicyResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listJobComplianceForPolicyResponse) return &listJobComplianceForPolicyResponse, err } diff --git a/service/marketplace/impl.go b/service/marketplace/impl.go index 81072eec0..278da15c6 100755 --- a/service/marketplace/impl.go +++ b/service/marketplace/impl.go @@ -18,18 +18,20 @@ type consumerFulfillmentsImpl struct { func (a *consumerFulfillmentsImpl) Get(ctx context.Context, request GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { var getListingContentMetadataResponse GetListingContentMetadataResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/content", request.ListingId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getListingContentMetadataResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getListingContentMetadataResponse) return &getListingContentMetadataResponse, err } func (a *consumerFulfillmentsImpl) List(ctx context.Context, request ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { var listFulfillmentsResponse ListFulfillmentsResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/fulfillments", request.ListingId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listFulfillmentsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFulfillmentsResponse) return &listFulfillmentsResponse, err } @@ -41,47 +43,52 @@ type consumerInstallationsImpl struct { func (a *consumerInstallationsImpl) Create(ctx context.Context, request CreateInstallationRequest) (*Installation, error) { var installation Installation path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/installations", request.ListingId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &installation) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &installation) return &installation, err } func (a *consumerInstallationsImpl) Delete(ctx context.Context, request DeleteInstallationRequest) error { var deleteInstallationResponse DeleteInstallationResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/installations/%v", request.ListingId, request.InstallationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteInstallationResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteInstallationResponse) return err } func (a *consumerInstallationsImpl) List(ctx context.Context, request ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { var listAllInstallationsResponse ListAllInstallationsResponse path := "/api/2.1/marketplace-consumer/installations" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAllInstallationsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAllInstallationsResponse) return &listAllInstallationsResponse, err } func (a *consumerInstallationsImpl) ListListingInstallations(ctx context.Context, request ListInstallationsRequest) (*ListInstallationsResponse, error) { var listInstallationsResponse ListInstallationsResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/installations", request.ListingId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listInstallationsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listInstallationsResponse) return &listInstallationsResponse, err } func (a *consumerInstallationsImpl) Update(ctx context.Context, request UpdateInstallationRequest) (*UpdateInstallationResponse, error) { var updateInstallationResponse UpdateInstallationResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/installations/%v", request.ListingId, request.InstallationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateInstallationResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateInstallationResponse) return &updateInstallationResponse, err } @@ -93,36 +100,40 @@ type consumerListingsImpl struct { func (a *consumerListingsImpl) BatchGet(ctx context.Context, request BatchGetListingsRequest) (*BatchGetListingsResponse, error) { var batchGetListingsResponse BatchGetListingsResponse path := "/api/2.1/marketplace-consumer/listings:batchGet" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &batchGetListingsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &batchGetListingsResponse) return &batchGetListingsResponse, err } func (a *consumerListingsImpl) Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) { var getListingResponse GetListingResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getListingResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getListingResponse) return &getListingResponse, err } func (a *consumerListingsImpl) List(ctx context.Context, request ListListingsRequest) (*ListListingsResponse, error) { var listListingsResponse ListListingsResponse path := "/api/2.1/marketplace-consumer/listings" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listListingsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listListingsResponse) return &listListingsResponse, err } func (a *consumerListingsImpl) Search(ctx context.Context, request SearchListingsRequest) (*SearchListingsResponse, error) { var searchListingsResponse SearchListingsResponse path := "/api/2.1/marketplace-consumer/search-listings" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &searchListingsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &searchListingsResponse) return &searchListingsResponse, err } @@ -134,28 +145,31 @@ type consumerPersonalizationRequestsImpl struct { func (a *consumerPersonalizationRequestsImpl) Create(ctx context.Context, request CreatePersonalizationRequest) (*CreatePersonalizationRequestResponse, error) { var createPersonalizationRequestResponse CreatePersonalizationRequestResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/personalization-requests", request.ListingId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createPersonalizationRequestResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createPersonalizationRequestResponse) return &createPersonalizationRequestResponse, err } func (a *consumerPersonalizationRequestsImpl) Get(ctx context.Context, request GetPersonalizationRequestRequest) (*GetPersonalizationRequestResponse, error) { var getPersonalizationRequestResponse GetPersonalizationRequestResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/personalization-requests", request.ListingId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getPersonalizationRequestResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPersonalizationRequestResponse) return &getPersonalizationRequestResponse, err } func (a *consumerPersonalizationRequestsImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { var listAllPersonalizationRequestsResponse ListAllPersonalizationRequestsResponse path := "/api/2.1/marketplace-consumer/personalization-requests" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAllPersonalizationRequestsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAllPersonalizationRequestsResponse) return &listAllPersonalizationRequestsResponse, err } @@ -167,27 +181,30 @@ type consumerProvidersImpl struct { func (a *consumerProvidersImpl) BatchGet(ctx context.Context, request BatchGetProvidersRequest) (*BatchGetProvidersResponse, error) { var batchGetProvidersResponse BatchGetProvidersResponse path := "/api/2.1/marketplace-consumer/providers:batchGet" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &batchGetProvidersResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &batchGetProvidersResponse) return &batchGetProvidersResponse, err } func (a *consumerProvidersImpl) Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) { var getProviderResponse GetProviderResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/providers/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getProviderResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getProviderResponse) return &getProviderResponse, err } func (a *consumerProvidersImpl) List(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { var listProvidersResponse ListProvidersResponse path := "/api/2.1/marketplace-consumer/providers" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listProvidersResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listProvidersResponse) return &listProvidersResponse, err } @@ -199,38 +216,42 @@ type providerExchangeFiltersImpl struct { func (a *providerExchangeFiltersImpl) Create(ctx context.Context, request CreateExchangeFilterRequest) (*CreateExchangeFilterResponse, error) { var createExchangeFilterResponse CreateExchangeFilterResponse path := "/api/2.0/marketplace-exchange/filters" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createExchangeFilterResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createExchangeFilterResponse) return &createExchangeFilterResponse, err } func (a *providerExchangeFiltersImpl) Delete(ctx context.Context, request DeleteExchangeFilterRequest) error { var deleteExchangeFilterResponse DeleteExchangeFilterResponse path := fmt.Sprintf("/api/2.0/marketplace-exchange/filters/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteExchangeFilterResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteExchangeFilterResponse) return err } func (a *providerExchangeFiltersImpl) List(ctx context.Context, request ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { var listExchangeFiltersResponse ListExchangeFiltersResponse path := "/api/2.0/marketplace-exchange/filters" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listExchangeFiltersResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExchangeFiltersResponse) return &listExchangeFiltersResponse, err } func (a *providerExchangeFiltersImpl) Update(ctx context.Context, request UpdateExchangeFilterRequest) (*UpdateExchangeFilterResponse, error) { var updateExchangeFilterResponse UpdateExchangeFilterResponse path := fmt.Sprintf("/api/2.0/marketplace-exchange/filters/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateExchangeFilterResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateExchangeFilterResponse) return &updateExchangeFilterResponse, err } @@ -242,84 +263,93 @@ type providerExchangesImpl struct { func (a *providerExchangesImpl) AddListingToExchange(ctx context.Context, request AddExchangeForListingRequest) (*AddExchangeForListingResponse, error) { var addExchangeForListingResponse AddExchangeForListingResponse path := "/api/2.0/marketplace-exchange/exchanges-for-listing" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &addExchangeForListingResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &addExchangeForListingResponse) return &addExchangeForListingResponse, err } func (a *providerExchangesImpl) Create(ctx context.Context, request CreateExchangeRequest) (*CreateExchangeResponse, error) { var createExchangeResponse CreateExchangeResponse path := "/api/2.0/marketplace-exchange/exchanges" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createExchangeResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createExchangeResponse) return &createExchangeResponse, err } func (a *providerExchangesImpl) Delete(ctx context.Context, request DeleteExchangeRequest) error { var deleteExchangeResponse DeleteExchangeResponse path := fmt.Sprintf("/api/2.0/marketplace-exchange/exchanges/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteExchangeResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteExchangeResponse) return err } func (a *providerExchangesImpl) DeleteListingFromExchange(ctx context.Context, request RemoveExchangeForListingRequest) error { var removeExchangeForListingResponse RemoveExchangeForListingResponse path := fmt.Sprintf("/api/2.0/marketplace-exchange/exchanges-for-listing/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &removeExchangeForListingResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &removeExchangeForListingResponse) return err } func (a *providerExchangesImpl) Get(ctx context.Context, request GetExchangeRequest) (*GetExchangeResponse, error) { var getExchangeResponse GetExchangeResponse path := fmt.Sprintf("/api/2.0/marketplace-exchange/exchanges/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getExchangeResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExchangeResponse) return &getExchangeResponse, err } func (a *providerExchangesImpl) List(ctx context.Context, request ListExchangesRequest) (*ListExchangesResponse, error) { var listExchangesResponse ListExchangesResponse path := "/api/2.0/marketplace-exchange/exchanges" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listExchangesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExchangesResponse) return &listExchangesResponse, err } func (a *providerExchangesImpl) ListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { var listExchangesForListingResponse ListExchangesForListingResponse path := "/api/2.0/marketplace-exchange/exchanges-for-listing" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listExchangesForListingResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExchangesForListingResponse) return &listExchangesForListingResponse, err } func (a *providerExchangesImpl) ListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { var listListingsForExchangeResponse ListListingsForExchangeResponse path := "/api/2.0/marketplace-exchange/listings-for-exchange" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listListingsForExchangeResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listListingsForExchangeResponse) return &listListingsForExchangeResponse, err } func (a *providerExchangesImpl) Update(ctx context.Context, request UpdateExchangeRequest) (*UpdateExchangeResponse, error) { var updateExchangeResponse UpdateExchangeResponse path := fmt.Sprintf("/api/2.0/marketplace-exchange/exchanges/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateExchangeResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateExchangeResponse) return &updateExchangeResponse, err } @@ -331,37 +361,41 @@ type providerFilesImpl struct { func (a *providerFilesImpl) Create(ctx context.Context, request CreateFileRequest) (*CreateFileResponse, error) { var createFileResponse CreateFileResponse path := "/api/2.0/marketplace-provider/files" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createFileResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createFileResponse) return &createFileResponse, err } func (a *providerFilesImpl) Delete(ctx context.Context, request DeleteFileRequest) error { var deleteFileResponse DeleteFileResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/files/%v", request.FileId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteFileResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteFileResponse) return err } func (a *providerFilesImpl) Get(ctx context.Context, request GetFileRequest) (*GetFileResponse, error) { var getFileResponse GetFileResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/files/%v", request.FileId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getFileResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getFileResponse) return &getFileResponse, err } func (a *providerFilesImpl) List(ctx context.Context, request ListFilesRequest) (*ListFilesResponse, error) { var listFilesResponse ListFilesResponse path := "/api/2.0/marketplace-provider/files" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listFilesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFilesResponse) return &listFilesResponse, err } @@ -373,47 +407,52 @@ type providerListingsImpl struct { func (a *providerListingsImpl) Create(ctx context.Context, request CreateListingRequest) (*CreateListingResponse, error) { var createListingResponse CreateListingResponse path := "/api/2.0/marketplace-provider/listing" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createListingResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createListingResponse) return &createListingResponse, err } func (a *providerListingsImpl) Delete(ctx context.Context, request DeleteListingRequest) error { var deleteListingResponse DeleteListingResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/listings/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteListingResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteListingResponse) return err } func (a *providerListingsImpl) Get(ctx context.Context, request GetListingRequest) (*GetListingResponse, error) { var getListingResponse GetListingResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/listings/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getListingResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getListingResponse) return &getListingResponse, err } func (a *providerListingsImpl) List(ctx context.Context, request GetListingsRequest) (*GetListingsResponse, error) { var getListingsResponse GetListingsResponse path := "/api/2.0/marketplace-provider/listings" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getListingsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getListingsResponse) return &getListingsResponse, err } func (a *providerListingsImpl) Update(ctx context.Context, request UpdateListingRequest) (*UpdateListingResponse, error) { var updateListingResponse UpdateListingResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/listings/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateListingResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateListingResponse) return &updateListingResponse, err } @@ -425,19 +464,21 @@ type providerPersonalizationRequestsImpl struct { func (a *providerPersonalizationRequestsImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { var listAllPersonalizationRequestsResponse ListAllPersonalizationRequestsResponse path := "/api/2.0/marketplace-provider/personalization-requests" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAllPersonalizationRequestsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAllPersonalizationRequestsResponse) return &listAllPersonalizationRequestsResponse, err } func (a *providerPersonalizationRequestsImpl) Update(ctx context.Context, request UpdatePersonalizationRequestRequest) (*UpdatePersonalizationRequestResponse, error) { var updatePersonalizationRequestResponse UpdatePersonalizationRequestResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/listings/%v/personalization-requests/%v/request-status", request.ListingId, request.RequestId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updatePersonalizationRequestResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updatePersonalizationRequestResponse) return &updatePersonalizationRequestResponse, err } @@ -449,37 +490,41 @@ type providerProviderAnalyticsDashboardsImpl struct { func (a *providerProviderAnalyticsDashboardsImpl) Create(ctx context.Context) (*ProviderAnalyticsDashboard, error) { var providerAnalyticsDashboard ProviderAnalyticsDashboard path := "/api/2.0/marketplace-provider/analytics_dashboard" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &providerAnalyticsDashboard) + err := a.client.Do(ctx, http.MethodPost, path, headers, nil, nil, &providerAnalyticsDashboard) return &providerAnalyticsDashboard, err } func (a *providerProviderAnalyticsDashboardsImpl) Get(ctx context.Context) (*ListProviderAnalyticsDashboardResponse, error) { var listProviderAnalyticsDashboardResponse ListProviderAnalyticsDashboardResponse path := "/api/2.0/marketplace-provider/analytics_dashboard" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listProviderAnalyticsDashboardResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listProviderAnalyticsDashboardResponse) return &listProviderAnalyticsDashboardResponse, err } func (a *providerProviderAnalyticsDashboardsImpl) GetLatestVersion(ctx context.Context) (*GetLatestVersionProviderAnalyticsDashboardResponse, error) { var getLatestVersionProviderAnalyticsDashboardResponse GetLatestVersionProviderAnalyticsDashboardResponse path := "/api/2.0/marketplace-provider/analytics_dashboard/latest" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &getLatestVersionProviderAnalyticsDashboardResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getLatestVersionProviderAnalyticsDashboardResponse) return &getLatestVersionProviderAnalyticsDashboardResponse, err } func (a *providerProviderAnalyticsDashboardsImpl) Update(ctx context.Context, request UpdateProviderAnalyticsDashboardRequest) (*UpdateProviderAnalyticsDashboardResponse, error) { var updateProviderAnalyticsDashboardResponse UpdateProviderAnalyticsDashboardResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/analytics_dashboard/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateProviderAnalyticsDashboardResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateProviderAnalyticsDashboardResponse) return &updateProviderAnalyticsDashboardResponse, err } @@ -491,46 +536,51 @@ type providerProvidersImpl struct { func (a *providerProvidersImpl) Create(ctx context.Context, request CreateProviderRequest) (*CreateProviderResponse, error) { var createProviderResponse CreateProviderResponse path := "/api/2.0/marketplace-provider/provider" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createProviderResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createProviderResponse) return &createProviderResponse, err } func (a *providerProvidersImpl) Delete(ctx context.Context, request DeleteProviderRequest) error { var deleteProviderResponse DeleteProviderResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/providers/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteProviderResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteProviderResponse) return err } func (a *providerProvidersImpl) Get(ctx context.Context, request GetProviderRequest) (*GetProviderResponse, error) { var getProviderResponse GetProviderResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/providers/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getProviderResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getProviderResponse) return &getProviderResponse, err } func (a *providerProvidersImpl) List(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { var listProvidersResponse ListProvidersResponse path := "/api/2.0/marketplace-provider/providers" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listProvidersResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listProvidersResponse) return &listProvidersResponse, err } func (a *providerProvidersImpl) Update(ctx context.Context, request UpdateProviderRequest) (*UpdateProviderResponse, error) { var updateProviderResponse UpdateProviderResponse path := fmt.Sprintf("/api/2.0/marketplace-provider/providers/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateProviderResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateProviderResponse) return &updateProviderResponse, err } diff --git a/service/ml/impl.go b/service/ml/impl.go index faf6b0f12..3141be45a 100755 --- a/service/ml/impl.go +++ b/service/ml/impl.go @@ -18,292 +18,322 @@ type experimentsImpl struct { func (a *experimentsImpl) CreateExperiment(ctx context.Context, request CreateExperiment) (*CreateExperimentResponse, error) { var createExperimentResponse CreateExperimentResponse path := "/api/2.0/mlflow/experiments/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createExperimentResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createExperimentResponse) return &createExperimentResponse, err } func (a *experimentsImpl) CreateRun(ctx context.Context, request CreateRun) (*CreateRunResponse, error) { var createRunResponse CreateRunResponse path := "/api/2.0/mlflow/runs/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createRunResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createRunResponse) return &createRunResponse, err } func (a *experimentsImpl) DeleteExperiment(ctx context.Context, request DeleteExperiment) error { var deleteExperimentResponse DeleteExperimentResponse path := "/api/2.0/mlflow/experiments/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteExperimentResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteExperimentResponse) return err } func (a *experimentsImpl) DeleteRun(ctx context.Context, request DeleteRun) error { var deleteRunResponse DeleteRunResponse path := "/api/2.0/mlflow/runs/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteRunResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteRunResponse) return err } func (a *experimentsImpl) DeleteRuns(ctx context.Context, request DeleteRuns) (*DeleteRunsResponse, error) { var deleteRunsResponse DeleteRunsResponse path := "/api/2.0/mlflow/databricks/runs/delete-runs" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteRunsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteRunsResponse) return &deleteRunsResponse, err } func (a *experimentsImpl) DeleteTag(ctx context.Context, request DeleteTag) error { var deleteTagResponse DeleteTagResponse path := "/api/2.0/mlflow/runs/delete-tag" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteTagResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteTagResponse) return err } func (a *experimentsImpl) GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentResponse, error) { var getExperimentResponse GetExperimentResponse path := "/api/2.0/mlflow/experiments/get-by-name" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getExperimentResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExperimentResponse) return &getExperimentResponse, err } func (a *experimentsImpl) GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) { var getExperimentResponse GetExperimentResponse path := "/api/2.0/mlflow/experiments/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getExperimentResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExperimentResponse) return &getExperimentResponse, err } func (a *experimentsImpl) GetHistory(ctx context.Context, request GetHistoryRequest) (*GetMetricHistoryResponse, error) { var getMetricHistoryResponse GetMetricHistoryResponse path := "/api/2.0/mlflow/metrics/get-history" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getMetricHistoryResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getMetricHistoryResponse) return &getMetricHistoryResponse, err } func (a *experimentsImpl) GetPermissionLevels(ctx context.Context, request GetExperimentPermissionLevelsRequest) (*GetExperimentPermissionLevelsResponse, error) { var getExperimentPermissionLevelsResponse GetExperimentPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/experiments/%v/permissionLevels", request.ExperimentId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getExperimentPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExperimentPermissionLevelsResponse) return &getExperimentPermissionLevelsResponse, err } func (a *experimentsImpl) GetPermissions(ctx context.Context, request GetExperimentPermissionsRequest) (*ExperimentPermissions, error) { var experimentPermissions ExperimentPermissions path := fmt.Sprintf("/api/2.0/permissions/experiments/%v", request.ExperimentId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &experimentPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &experimentPermissions) return &experimentPermissions, err } func (a *experimentsImpl) GetRun(ctx context.Context, request GetRunRequest) (*GetRunResponse, error) { var getRunResponse GetRunResponse path := "/api/2.0/mlflow/runs/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getRunResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRunResponse) return &getRunResponse, err } func (a *experimentsImpl) ListArtifacts(ctx context.Context, request ListArtifactsRequest) (*ListArtifactsResponse, error) { var listArtifactsResponse ListArtifactsResponse path := "/api/2.0/mlflow/artifacts/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listArtifactsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listArtifactsResponse) return &listArtifactsResponse, err } func (a *experimentsImpl) ListExperiments(ctx context.Context, request ListExperimentsRequest) (*ListExperimentsResponse, error) { var listExperimentsResponse ListExperimentsResponse path := "/api/2.0/mlflow/experiments/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listExperimentsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listExperimentsResponse) return &listExperimentsResponse, err } func (a *experimentsImpl) LogBatch(ctx context.Context, request LogBatch) error { var logBatchResponse LogBatchResponse path := "/api/2.0/mlflow/runs/log-batch" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &logBatchResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logBatchResponse) return err } func (a *experimentsImpl) LogInputs(ctx context.Context, request LogInputs) error { var logInputsResponse LogInputsResponse path := "/api/2.0/mlflow/runs/log-inputs" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &logInputsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logInputsResponse) return err } func (a *experimentsImpl) LogMetric(ctx context.Context, request LogMetric) error { var logMetricResponse LogMetricResponse path := "/api/2.0/mlflow/runs/log-metric" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &logMetricResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logMetricResponse) return err } func (a *experimentsImpl) LogModel(ctx context.Context, request LogModel) error { var logModelResponse LogModelResponse path := "/api/2.0/mlflow/runs/log-model" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &logModelResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logModelResponse) return err } func (a *experimentsImpl) LogParam(ctx context.Context, request LogParam) error { var logParamResponse LogParamResponse path := "/api/2.0/mlflow/runs/log-parameter" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &logParamResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &logParamResponse) return err } func (a *experimentsImpl) RestoreExperiment(ctx context.Context, request RestoreExperiment) error { var restoreExperimentResponse RestoreExperimentResponse path := "/api/2.0/mlflow/experiments/restore" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &restoreExperimentResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &restoreExperimentResponse) return err } func (a *experimentsImpl) RestoreRun(ctx context.Context, request RestoreRun) error { var restoreRunResponse RestoreRunResponse path := "/api/2.0/mlflow/runs/restore" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &restoreRunResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &restoreRunResponse) return err } func (a *experimentsImpl) RestoreRuns(ctx context.Context, request RestoreRuns) (*RestoreRunsResponse, error) { var restoreRunsResponse RestoreRunsResponse path := "/api/2.0/mlflow/databricks/runs/restore-runs" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &restoreRunsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &restoreRunsResponse) return &restoreRunsResponse, err } func (a *experimentsImpl) SearchExperiments(ctx context.Context, request SearchExperiments) (*SearchExperimentsResponse, error) { var searchExperimentsResponse SearchExperimentsResponse path := "/api/2.0/mlflow/experiments/search" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &searchExperimentsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &searchExperimentsResponse) return &searchExperimentsResponse, err } func (a *experimentsImpl) SearchRuns(ctx context.Context, request SearchRuns) (*SearchRunsResponse, error) { var searchRunsResponse SearchRunsResponse path := "/api/2.0/mlflow/runs/search" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &searchRunsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &searchRunsResponse) return &searchRunsResponse, err } func (a *experimentsImpl) SetExperimentTag(ctx context.Context, request SetExperimentTag) error { var setExperimentTagResponse SetExperimentTagResponse path := "/api/2.0/mlflow/experiments/set-experiment-tag" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &setExperimentTagResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setExperimentTagResponse) return err } func (a *experimentsImpl) SetPermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) { var experimentPermissions ExperimentPermissions path := fmt.Sprintf("/api/2.0/permissions/experiments/%v", request.ExperimentId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &experimentPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &experimentPermissions) return &experimentPermissions, err } func (a *experimentsImpl) SetTag(ctx context.Context, request SetTag) error { var setTagResponse SetTagResponse path := "/api/2.0/mlflow/runs/set-tag" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &setTagResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setTagResponse) return err } func (a *experimentsImpl) UpdateExperiment(ctx context.Context, request UpdateExperiment) error { var updateExperimentResponse UpdateExperimentResponse path := "/api/2.0/mlflow/experiments/update" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &updateExperimentResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &updateExperimentResponse) return err } func (a *experimentsImpl) UpdatePermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) { var experimentPermissions ExperimentPermissions path := fmt.Sprintf("/api/2.0/permissions/experiments/%v", request.ExperimentId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &experimentPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &experimentPermissions) return &experimentPermissions, err } func (a *experimentsImpl) UpdateRun(ctx context.Context, request UpdateRun) (*UpdateRunResponse, error) { var updateRunResponse UpdateRunResponse path := "/api/2.0/mlflow/runs/update" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &updateRunResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &updateRunResponse) return &updateRunResponse, err } @@ -315,342 +345,378 @@ type modelRegistryImpl struct { func (a *modelRegistryImpl) ApproveTransitionRequest(ctx context.Context, request ApproveTransitionRequest) (*ApproveTransitionRequestResponse, error) { var approveTransitionRequestResponse ApproveTransitionRequestResponse path := "/api/2.0/mlflow/transition-requests/approve" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &approveTransitionRequestResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &approveTransitionRequestResponse) return &approveTransitionRequestResponse, err } func (a *modelRegistryImpl) CreateComment(ctx context.Context, request CreateComment) (*CreateCommentResponse, error) { var createCommentResponse CreateCommentResponse path := "/api/2.0/mlflow/comments/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createCommentResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createCommentResponse) return &createCommentResponse, err } func (a *modelRegistryImpl) CreateModel(ctx context.Context, request CreateModelRequest) (*CreateModelResponse, error) { var createModelResponse CreateModelResponse path := "/api/2.0/mlflow/registered-models/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createModelResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createModelResponse) return &createModelResponse, err } func (a *modelRegistryImpl) CreateModelVersion(ctx context.Context, request CreateModelVersionRequest) (*CreateModelVersionResponse, error) { var createModelVersionResponse CreateModelVersionResponse path := "/api/2.0/mlflow/model-versions/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createModelVersionResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createModelVersionResponse) return &createModelVersionResponse, err } func (a *modelRegistryImpl) CreateTransitionRequest(ctx context.Context, request CreateTransitionRequest) (*CreateTransitionRequestResponse, error) { var createTransitionRequestResponse CreateTransitionRequestResponse path := "/api/2.0/mlflow/transition-requests/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createTransitionRequestResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createTransitionRequestResponse) return &createTransitionRequestResponse, err } func (a *modelRegistryImpl) CreateWebhook(ctx context.Context, request CreateRegistryWebhook) (*CreateWebhookResponse, error) { var createWebhookResponse CreateWebhookResponse path := "/api/2.0/mlflow/registry-webhooks/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createWebhookResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createWebhookResponse) return &createWebhookResponse, err } func (a *modelRegistryImpl) DeleteComment(ctx context.Context, request DeleteCommentRequest) error { var deleteCommentResponse DeleteCommentResponse path := "/api/2.0/mlflow/comments/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteCommentResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCommentResponse) return err } func (a *modelRegistryImpl) DeleteModel(ctx context.Context, request DeleteModelRequest) error { var deleteModelResponse DeleteModelResponse path := "/api/2.0/mlflow/registered-models/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteModelResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteModelResponse) return err } func (a *modelRegistryImpl) DeleteModelTag(ctx context.Context, request DeleteModelTagRequest) error { var deleteModelTagResponse DeleteModelTagResponse path := "/api/2.0/mlflow/registered-models/delete-tag" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteModelTagResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteModelTagResponse) return err } func (a *modelRegistryImpl) DeleteModelVersion(ctx context.Context, request DeleteModelVersionRequest) error { var deleteModelVersionResponse DeleteModelVersionResponse path := "/api/2.0/mlflow/model-versions/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteModelVersionResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteModelVersionResponse) return err } func (a *modelRegistryImpl) DeleteModelVersionTag(ctx context.Context, request DeleteModelVersionTagRequest) error { var deleteModelVersionTagResponse DeleteModelVersionTagResponse path := "/api/2.0/mlflow/model-versions/delete-tag" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteModelVersionTagResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteModelVersionTagResponse) return err } func (a *modelRegistryImpl) DeleteTransitionRequest(ctx context.Context, request DeleteTransitionRequestRequest) error { var deleteTransitionRequestResponse DeleteTransitionRequestResponse path := "/api/2.0/mlflow/transition-requests/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteTransitionRequestResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteTransitionRequestResponse) return err } func (a *modelRegistryImpl) DeleteWebhook(ctx context.Context, request DeleteWebhookRequest) error { var deleteWebhookResponse DeleteWebhookResponse path := "/api/2.0/mlflow/registry-webhooks/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteWebhookResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteWebhookResponse) return err } func (a *modelRegistryImpl) GetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { var getLatestVersionsResponse GetLatestVersionsResponse path := "/api/2.0/mlflow/registered-models/get-latest-versions" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &getLatestVersionsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &getLatestVersionsResponse) return &getLatestVersionsResponse, err } func (a *modelRegistryImpl) GetModel(ctx context.Context, request GetModelRequest) (*GetModelResponse, error) { var getModelResponse GetModelResponse path := "/api/2.0/mlflow/databricks/registered-models/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getModelResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getModelResponse) return &getModelResponse, err } func (a *modelRegistryImpl) GetModelVersion(ctx context.Context, request GetModelVersionRequest) (*GetModelVersionResponse, error) { var getModelVersionResponse GetModelVersionResponse path := "/api/2.0/mlflow/model-versions/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getModelVersionResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getModelVersionResponse) return &getModelVersionResponse, err } func (a *modelRegistryImpl) GetModelVersionDownloadUri(ctx context.Context, request GetModelVersionDownloadUriRequest) (*GetModelVersionDownloadUriResponse, error) { var getModelVersionDownloadUriResponse GetModelVersionDownloadUriResponse path := "/api/2.0/mlflow/model-versions/get-download-uri" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getModelVersionDownloadUriResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getModelVersionDownloadUriResponse) return &getModelVersionDownloadUriResponse, err } func (a *modelRegistryImpl) GetPermissionLevels(ctx context.Context, request GetRegisteredModelPermissionLevelsRequest) (*GetRegisteredModelPermissionLevelsResponse, error) { var getRegisteredModelPermissionLevelsResponse GetRegisteredModelPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/registered-models/%v/permissionLevels", request.RegisteredModelId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getRegisteredModelPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRegisteredModelPermissionLevelsResponse) return &getRegisteredModelPermissionLevelsResponse, err } func (a *modelRegistryImpl) GetPermissions(ctx context.Context, request GetRegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { var registeredModelPermissions RegisteredModelPermissions path := fmt.Sprintf("/api/2.0/permissions/registered-models/%v", request.RegisteredModelId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, ®isteredModelPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, ®isteredModelPermissions) return ®isteredModelPermissions, err } func (a *modelRegistryImpl) ListModels(ctx context.Context, request ListModelsRequest) (*ListModelsResponse, error) { var listModelsResponse ListModelsResponse path := "/api/2.0/mlflow/registered-models/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listModelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listModelsResponse) return &listModelsResponse, err } func (a *modelRegistryImpl) ListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { var listTransitionRequestsResponse ListTransitionRequestsResponse path := "/api/2.0/mlflow/transition-requests/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listTransitionRequestsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listTransitionRequestsResponse) return &listTransitionRequestsResponse, err } func (a *modelRegistryImpl) ListWebhooks(ctx context.Context, request ListWebhooksRequest) (*ListRegistryWebhooks, error) { var listRegistryWebhooks ListRegistryWebhooks path := "/api/2.0/mlflow/registry-webhooks/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listRegistryWebhooks) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listRegistryWebhooks) return &listRegistryWebhooks, err } func (a *modelRegistryImpl) RejectTransitionRequest(ctx context.Context, request RejectTransitionRequest) (*RejectTransitionRequestResponse, error) { var rejectTransitionRequestResponse RejectTransitionRequestResponse path := "/api/2.0/mlflow/transition-requests/reject" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &rejectTransitionRequestResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &rejectTransitionRequestResponse) return &rejectTransitionRequestResponse, err } func (a *modelRegistryImpl) RenameModel(ctx context.Context, request RenameModelRequest) (*RenameModelResponse, error) { var renameModelResponse RenameModelResponse path := "/api/2.0/mlflow/registered-models/rename" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &renameModelResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &renameModelResponse) return &renameModelResponse, err } func (a *modelRegistryImpl) SearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { var searchModelVersionsResponse SearchModelVersionsResponse path := "/api/2.0/mlflow/model-versions/search" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &searchModelVersionsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &searchModelVersionsResponse) return &searchModelVersionsResponse, err } func (a *modelRegistryImpl) SearchModels(ctx context.Context, request SearchModelsRequest) (*SearchModelsResponse, error) { var searchModelsResponse SearchModelsResponse path := "/api/2.0/mlflow/registered-models/search" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &searchModelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &searchModelsResponse) return &searchModelsResponse, err } func (a *modelRegistryImpl) SetModelTag(ctx context.Context, request SetModelTagRequest) error { var setModelTagResponse SetModelTagResponse path := "/api/2.0/mlflow/registered-models/set-tag" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &setModelTagResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setModelTagResponse) return err } func (a *modelRegistryImpl) SetModelVersionTag(ctx context.Context, request SetModelVersionTagRequest) error { var setModelVersionTagResponse SetModelVersionTagResponse path := "/api/2.0/mlflow/model-versions/set-tag" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &setModelVersionTagResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setModelVersionTagResponse) return err } func (a *modelRegistryImpl) SetPermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { var registeredModelPermissions RegisteredModelPermissions path := fmt.Sprintf("/api/2.0/permissions/registered-models/%v", request.RegisteredModelId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, ®isteredModelPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, ®isteredModelPermissions) return ®isteredModelPermissions, err } func (a *modelRegistryImpl) TestRegistryWebhook(ctx context.Context, request TestRegistryWebhookRequest) (*TestRegistryWebhookResponse, error) { var testRegistryWebhookResponse TestRegistryWebhookResponse path := "/api/2.0/mlflow/registry-webhooks/test" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &testRegistryWebhookResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &testRegistryWebhookResponse) return &testRegistryWebhookResponse, err } func (a *modelRegistryImpl) TransitionStage(ctx context.Context, request TransitionModelVersionStageDatabricks) (*TransitionStageResponse, error) { var transitionStageResponse TransitionStageResponse path := "/api/2.0/mlflow/databricks/model-versions/transition-stage" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &transitionStageResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &transitionStageResponse) return &transitionStageResponse, err } func (a *modelRegistryImpl) UpdateComment(ctx context.Context, request UpdateComment) (*UpdateCommentResponse, error) { var updateCommentResponse UpdateCommentResponse path := "/api/2.0/mlflow/comments/update" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateCommentResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateCommentResponse) return &updateCommentResponse, err } func (a *modelRegistryImpl) UpdateModel(ctx context.Context, request UpdateModelRequest) error { var updateModelResponse UpdateModelResponse path := "/api/2.0/mlflow/registered-models/update" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateModelResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateModelResponse) return err } func (a *modelRegistryImpl) UpdateModelVersion(ctx context.Context, request UpdateModelVersionRequest) error { var updateModelVersionResponse UpdateModelVersionResponse path := "/api/2.0/mlflow/model-versions/update" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateModelVersionResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateModelVersionResponse) return err } func (a *modelRegistryImpl) UpdatePermissions(ctx context.Context, request RegisteredModelPermissionsRequest) (*RegisteredModelPermissions, error) { var registeredModelPermissions RegisteredModelPermissions path := fmt.Sprintf("/api/2.0/permissions/registered-models/%v", request.RegisteredModelId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, ®isteredModelPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, ®isteredModelPermissions) return ®isteredModelPermissions, err } func (a *modelRegistryImpl) UpdateWebhook(ctx context.Context, request UpdateRegistryWebhook) error { var updateWebhookResponse UpdateWebhookResponse path := "/api/2.0/mlflow/registry-webhooks/update" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateWebhookResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateWebhookResponse) return err } diff --git a/service/oauth2/impl.go b/service/oauth2/impl.go index 643052a3b..484596f10 100755 --- a/service/oauth2/impl.go +++ b/service/oauth2/impl.go @@ -18,47 +18,54 @@ type accountFederationPolicyImpl struct { func (a *accountFederationPolicyImpl) Create(ctx context.Context, request CreateAccountFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/federationPolicies", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + queryParams["policy_id"] = request.PolicyId headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.Policy, &federationPolicy) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Policy, &federationPolicy) return &federationPolicy, err } func (a *accountFederationPolicyImpl) Delete(ctx context.Context, request DeleteAccountFederationPolicyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *accountFederationPolicyImpl) Get(ctx context.Context, request GetAccountFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &federationPolicy) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &federationPolicy) return &federationPolicy, err } func (a *accountFederationPolicyImpl) List(ctx context.Context, request ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { var listFederationPoliciesResponse ListFederationPoliciesResponse path := fmt.Sprintf("/api/2.0/accounts/%v/federationPolicies", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listFederationPoliciesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFederationPoliciesResponse) return &listFederationPoliciesResponse, err } func (a *accountFederationPolicyImpl) Update(ctx context.Context, request UpdateAccountFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + queryParams["update_mask"] = request.UpdateMask headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request.Policy, &federationPolicy) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Policy, &federationPolicy) return &federationPolicy, err } @@ -70,47 +77,52 @@ type customAppIntegrationImpl struct { func (a *customAppIntegrationImpl) Create(ctx context.Context, request CreateCustomAppIntegration) (*CreateCustomAppIntegrationOutput, error) { var createCustomAppIntegrationOutput CreateCustomAppIntegrationOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/custom-app-integrations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createCustomAppIntegrationOutput) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createCustomAppIntegrationOutput) return &createCustomAppIntegrationOutput, err } func (a *customAppIntegrationImpl) Delete(ctx context.Context, request DeleteCustomAppIntegrationRequest) error { var deleteCustomAppIntegrationOutput DeleteCustomAppIntegrationOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/custom-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteCustomAppIntegrationOutput) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCustomAppIntegrationOutput) return err } func (a *customAppIntegrationImpl) Get(ctx context.Context, request GetCustomAppIntegrationRequest) (*GetCustomAppIntegrationOutput, error) { var getCustomAppIntegrationOutput GetCustomAppIntegrationOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/custom-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getCustomAppIntegrationOutput) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCustomAppIntegrationOutput) return &getCustomAppIntegrationOutput, err } func (a *customAppIntegrationImpl) List(ctx context.Context, request ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { var getCustomAppIntegrationsOutput GetCustomAppIntegrationsOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/custom-app-integrations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getCustomAppIntegrationsOutput) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCustomAppIntegrationsOutput) return &getCustomAppIntegrationsOutput, err } func (a *customAppIntegrationImpl) Update(ctx context.Context, request UpdateCustomAppIntegration) error { var updateCustomAppIntegrationOutput UpdateCustomAppIntegrationOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/custom-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateCustomAppIntegrationOutput) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateCustomAppIntegrationOutput) return err } @@ -122,9 +134,10 @@ type oAuthPublishedAppsImpl struct { func (a *oAuthPublishedAppsImpl) List(ctx context.Context, request ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { var getPublishedAppsOutput GetPublishedAppsOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/published-apps", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getPublishedAppsOutput) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedAppsOutput) return &getPublishedAppsOutput, err } @@ -136,47 +149,52 @@ type publishedAppIntegrationImpl struct { func (a *publishedAppIntegrationImpl) Create(ctx context.Context, request CreatePublishedAppIntegration) (*CreatePublishedAppIntegrationOutput, error) { var createPublishedAppIntegrationOutput CreatePublishedAppIntegrationOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/published-app-integrations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createPublishedAppIntegrationOutput) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createPublishedAppIntegrationOutput) return &createPublishedAppIntegrationOutput, err } func (a *publishedAppIntegrationImpl) Delete(ctx context.Context, request DeletePublishedAppIntegrationRequest) error { var deletePublishedAppIntegrationOutput DeletePublishedAppIntegrationOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/published-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deletePublishedAppIntegrationOutput) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deletePublishedAppIntegrationOutput) return err } func (a *publishedAppIntegrationImpl) Get(ctx context.Context, request GetPublishedAppIntegrationRequest) (*GetPublishedAppIntegrationOutput, error) { var getPublishedAppIntegrationOutput GetPublishedAppIntegrationOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/published-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getPublishedAppIntegrationOutput) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedAppIntegrationOutput) return &getPublishedAppIntegrationOutput, err } func (a *publishedAppIntegrationImpl) List(ctx context.Context, request ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { var getPublishedAppIntegrationsOutput GetPublishedAppIntegrationsOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/published-app-integrations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getPublishedAppIntegrationsOutput) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedAppIntegrationsOutput) return &getPublishedAppIntegrationsOutput, err } func (a *publishedAppIntegrationImpl) Update(ctx context.Context, request UpdatePublishedAppIntegration) error { var updatePublishedAppIntegrationOutput UpdatePublishedAppIntegrationOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/published-app-integrations/%v", a.client.ConfiguredAccountID(), request.IntegrationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updatePublishedAppIntegrationOutput) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updatePublishedAppIntegrationOutput) return err } @@ -188,47 +206,54 @@ type servicePrincipalFederationPolicyImpl struct { func (a *servicePrincipalFederationPolicyImpl) Create(ctx context.Context, request CreateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies", a.client.ConfiguredAccountID(), request.ServicePrincipalId) + queryParams := make(map[string]any) + queryParams["policy_id"] = request.PolicyId headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request.Policy, &federationPolicy) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.Policy, &federationPolicy) return &federationPolicy, err } func (a *servicePrincipalFederationPolicyImpl) Delete(ctx context.Context, request DeleteServicePrincipalFederationPolicyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *servicePrincipalFederationPolicyImpl) Get(ctx context.Context, request GetServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &federationPolicy) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &federationPolicy) return &federationPolicy, err } func (a *servicePrincipalFederationPolicyImpl) List(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { var listFederationPoliciesResponse ListFederationPoliciesResponse path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies", a.client.ConfiguredAccountID(), request.ServicePrincipalId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listFederationPoliciesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listFederationPoliciesResponse) return &listFederationPoliciesResponse, err } func (a *servicePrincipalFederationPolicyImpl) Update(ctx context.Context, request UpdateServicePrincipalFederationPolicyRequest) (*FederationPolicy, error) { var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) + queryParams := make(map[string]any) + queryParams["update_mask"] = request.UpdateMask headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request.Policy, &federationPolicy) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Policy, &federationPolicy) return &federationPolicy, err } @@ -240,25 +265,28 @@ type servicePrincipalSecretsImpl struct { func (a *servicePrincipalSecretsImpl) Create(ctx context.Context, request CreateServicePrincipalSecretRequest) (*CreateServicePrincipalSecretResponse, error) { var createServicePrincipalSecretResponse CreateServicePrincipalSecretResponse path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/credentials/secrets", a.client.ConfiguredAccountID(), request.ServicePrincipalId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &createServicePrincipalSecretResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &createServicePrincipalSecretResponse) return &createServicePrincipalSecretResponse, err } func (a *servicePrincipalSecretsImpl) Delete(ctx context.Context, request DeleteServicePrincipalSecretRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/credentials/secrets/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.SecretId) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *servicePrincipalSecretsImpl) List(ctx context.Context, request ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { var listServicePrincipalSecretsResponse ListServicePrincipalSecretsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/credentials/secrets", a.client.ConfiguredAccountID(), request.ServicePrincipalId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listServicePrincipalSecretsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listServicePrincipalSecretsResponse) return &listServicePrincipalSecretsResponse, err } diff --git a/service/pipelines/impl.go b/service/pipelines/impl.go index 45c169621..d4ad82054 100755 --- a/service/pipelines/impl.go +++ b/service/pipelines/impl.go @@ -18,130 +18,144 @@ type pipelinesImpl struct { func (a *pipelinesImpl) Create(ctx context.Context, request CreatePipeline) (*CreatePipelineResponse, error) { var createPipelineResponse CreatePipelineResponse path := "/api/2.0/pipelines" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createPipelineResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createPipelineResponse) return &createPipelineResponse, err } func (a *pipelinesImpl) Delete(ctx context.Context, request DeletePipelineRequest) error { var deletePipelineResponse DeletePipelineResponse path := fmt.Sprintf("/api/2.0/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deletePipelineResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deletePipelineResponse) return err } func (a *pipelinesImpl) Get(ctx context.Context, request GetPipelineRequest) (*GetPipelineResponse, error) { var getPipelineResponse GetPipelineResponse path := fmt.Sprintf("/api/2.0/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getPipelineResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPipelineResponse) return &getPipelineResponse, err } func (a *pipelinesImpl) GetPermissionLevels(ctx context.Context, request GetPipelinePermissionLevelsRequest) (*GetPipelinePermissionLevelsResponse, error) { var getPipelinePermissionLevelsResponse GetPipelinePermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/pipelines/%v/permissionLevels", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getPipelinePermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPipelinePermissionLevelsResponse) return &getPipelinePermissionLevelsResponse, err } func (a *pipelinesImpl) GetPermissions(ctx context.Context, request GetPipelinePermissionsRequest) (*PipelinePermissions, error) { var pipelinePermissions PipelinePermissions path := fmt.Sprintf("/api/2.0/permissions/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &pipelinePermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &pipelinePermissions) return &pipelinePermissions, err } func (a *pipelinesImpl) GetUpdate(ctx context.Context, request GetUpdateRequest) (*GetUpdateResponse, error) { var getUpdateResponse GetUpdateResponse path := fmt.Sprintf("/api/2.0/pipelines/%v/updates/%v", request.PipelineId, request.UpdateId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getUpdateResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getUpdateResponse) return &getUpdateResponse, err } func (a *pipelinesImpl) ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { var listPipelineEventsResponse ListPipelineEventsResponse path := fmt.Sprintf("/api/2.0/pipelines/%v/events", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listPipelineEventsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listPipelineEventsResponse) return &listPipelineEventsResponse, err } func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) { var listPipelinesResponse ListPipelinesResponse path := "/api/2.0/pipelines" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listPipelinesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listPipelinesResponse) return &listPipelinesResponse, err } func (a *pipelinesImpl) ListUpdates(ctx context.Context, request ListUpdatesRequest) (*ListUpdatesResponse, error) { var listUpdatesResponse ListUpdatesResponse path := fmt.Sprintf("/api/2.0/pipelines/%v/updates", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listUpdatesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listUpdatesResponse) return &listUpdatesResponse, err } func (a *pipelinesImpl) SetPermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) { var pipelinePermissions PipelinePermissions path := fmt.Sprintf("/api/2.0/permissions/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &pipelinePermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &pipelinePermissions) return &pipelinePermissions, err } func (a *pipelinesImpl) StartUpdate(ctx context.Context, request StartUpdate) (*StartUpdateResponse, error) { var startUpdateResponse StartUpdateResponse path := fmt.Sprintf("/api/2.0/pipelines/%v/updates", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &startUpdateResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &startUpdateResponse) return &startUpdateResponse, err } func (a *pipelinesImpl) Stop(ctx context.Context, request StopRequest) error { var stopPipelineResponse StopPipelineResponse path := fmt.Sprintf("/api/2.0/pipelines/%v/stop", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &stopPipelineResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &stopPipelineResponse) return err } func (a *pipelinesImpl) Update(ctx context.Context, request EditPipeline) error { var editPipelineResponse EditPipelineResponse path := fmt.Sprintf("/api/2.0/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &editPipelineResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &editPipelineResponse) return err } func (a *pipelinesImpl) UpdatePermissions(ctx context.Context, request PipelinePermissionsRequest) (*PipelinePermissions, error) { var pipelinePermissions PipelinePermissions path := fmt.Sprintf("/api/2.0/permissions/pipelines/%v", request.PipelineId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &pipelinePermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &pipelinePermissions) return &pipelinePermissions, err } diff --git a/service/pkg.go b/service/pkg.go index 0e5a53e7e..bd49b7d6b 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -48,10 +48,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. -// // - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. // +// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. diff --git a/service/provisioning/impl.go b/service/provisioning/impl.go index 6b389f472..00fe68611 100755 --- a/service/provisioning/impl.go +++ b/service/provisioning/impl.go @@ -18,37 +18,41 @@ type credentialsImpl struct { func (a *credentialsImpl) Create(ctx context.Context, request CreateCredentialRequest) (*Credential, error) { var credential Credential path := fmt.Sprintf("/api/2.0/accounts/%v/credentials", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &credential) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &credential) return &credential, err } func (a *credentialsImpl) Delete(ctx context.Context, request DeleteCredentialRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/credentials/%v", a.client.ConfiguredAccountID(), request.CredentialsId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *credentialsImpl) Get(ctx context.Context, request GetCredentialRequest) (*Credential, error) { var credential Credential path := fmt.Sprintf("/api/2.0/accounts/%v/credentials/%v", a.client.ConfiguredAccountID(), request.CredentialsId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &credential) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &credential) return &credential, err } func (a *credentialsImpl) List(ctx context.Context) ([]Credential, error) { var credentialList []Credential path := fmt.Sprintf("/api/2.0/accounts/%v/credentials", a.client.ConfiguredAccountID()) + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &credentialList) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &credentialList) return credentialList, err } @@ -60,37 +64,41 @@ type encryptionKeysImpl struct { func (a *encryptionKeysImpl) Create(ctx context.Context, request CreateCustomerManagedKeyRequest) (*CustomerManagedKey, error) { var customerManagedKey CustomerManagedKey path := fmt.Sprintf("/api/2.0/accounts/%v/customer-managed-keys", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &customerManagedKey) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &customerManagedKey) return &customerManagedKey, err } func (a *encryptionKeysImpl) Delete(ctx context.Context, request DeleteEncryptionKeyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/customer-managed-keys/%v", a.client.ConfiguredAccountID(), request.CustomerManagedKeyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *encryptionKeysImpl) Get(ctx context.Context, request GetEncryptionKeyRequest) (*CustomerManagedKey, error) { var customerManagedKey CustomerManagedKey path := fmt.Sprintf("/api/2.0/accounts/%v/customer-managed-keys/%v", a.client.ConfiguredAccountID(), request.CustomerManagedKeyId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &customerManagedKey) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &customerManagedKey) return &customerManagedKey, err } func (a *encryptionKeysImpl) List(ctx context.Context) ([]CustomerManagedKey, error) { var customerManagedKeyList []CustomerManagedKey path := fmt.Sprintf("/api/2.0/accounts/%v/customer-managed-keys", a.client.ConfiguredAccountID()) + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &customerManagedKeyList) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &customerManagedKeyList) return customerManagedKeyList, err } @@ -102,37 +110,41 @@ type networksImpl struct { func (a *networksImpl) Create(ctx context.Context, request CreateNetworkRequest) (*Network, error) { var network Network path := fmt.Sprintf("/api/2.0/accounts/%v/networks", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &network) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &network) return &network, err } func (a *networksImpl) Delete(ctx context.Context, request DeleteNetworkRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/networks/%v", a.client.ConfiguredAccountID(), request.NetworkId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *networksImpl) Get(ctx context.Context, request GetNetworkRequest) (*Network, error) { var network Network path := fmt.Sprintf("/api/2.0/accounts/%v/networks/%v", a.client.ConfiguredAccountID(), request.NetworkId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &network) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &network) return &network, err } func (a *networksImpl) List(ctx context.Context) ([]Network, error) { var networkList []Network path := fmt.Sprintf("/api/2.0/accounts/%v/networks", a.client.ConfiguredAccountID()) + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &networkList) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &networkList) return networkList, err } @@ -144,47 +156,52 @@ type privateAccessImpl struct { func (a *privateAccessImpl) Create(ctx context.Context, request UpsertPrivateAccessSettingsRequest) (*PrivateAccessSettings, error) { var privateAccessSettings PrivateAccessSettings path := fmt.Sprintf("/api/2.0/accounts/%v/private-access-settings", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &privateAccessSettings) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &privateAccessSettings) return &privateAccessSettings, err } func (a *privateAccessImpl) Delete(ctx context.Context, request DeletePrivateAccesRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/private-access-settings/%v", a.client.ConfiguredAccountID(), request.PrivateAccessSettingsId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *privateAccessImpl) Get(ctx context.Context, request GetPrivateAccesRequest) (*PrivateAccessSettings, error) { var privateAccessSettings PrivateAccessSettings path := fmt.Sprintf("/api/2.0/accounts/%v/private-access-settings/%v", a.client.ConfiguredAccountID(), request.PrivateAccessSettingsId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &privateAccessSettings) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &privateAccessSettings) return &privateAccessSettings, err } func (a *privateAccessImpl) List(ctx context.Context) ([]PrivateAccessSettings, error) { var privateAccessSettingsList []PrivateAccessSettings path := fmt.Sprintf("/api/2.0/accounts/%v/private-access-settings", a.client.ConfiguredAccountID()) + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &privateAccessSettingsList) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &privateAccessSettingsList) return privateAccessSettingsList, err } func (a *privateAccessImpl) Replace(ctx context.Context, request UpsertPrivateAccessSettingsRequest) error { var replaceResponse ReplaceResponse path := fmt.Sprintf("/api/2.0/accounts/%v/private-access-settings/%v", a.client.ConfiguredAccountID(), request.PrivateAccessSettingsId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &replaceResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &replaceResponse) return err } @@ -196,37 +213,41 @@ type storageImpl struct { func (a *storageImpl) Create(ctx context.Context, request CreateStorageConfigurationRequest) (*StorageConfiguration, error) { var storageConfiguration StorageConfiguration path := fmt.Sprintf("/api/2.0/accounts/%v/storage-configurations", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &storageConfiguration) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &storageConfiguration) return &storageConfiguration, err } func (a *storageImpl) Delete(ctx context.Context, request DeleteStorageRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/storage-configurations/%v", a.client.ConfiguredAccountID(), request.StorageConfigurationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *storageImpl) Get(ctx context.Context, request GetStorageRequest) (*StorageConfiguration, error) { var storageConfiguration StorageConfiguration path := fmt.Sprintf("/api/2.0/accounts/%v/storage-configurations/%v", a.client.ConfiguredAccountID(), request.StorageConfigurationId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &storageConfiguration) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &storageConfiguration) return &storageConfiguration, err } func (a *storageImpl) List(ctx context.Context) ([]StorageConfiguration, error) { var storageConfigurationList []StorageConfiguration path := fmt.Sprintf("/api/2.0/accounts/%v/storage-configurations", a.client.ConfiguredAccountID()) + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &storageConfigurationList) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &storageConfigurationList) return storageConfigurationList, err } @@ -238,37 +259,41 @@ type vpcEndpointsImpl struct { func (a *vpcEndpointsImpl) Create(ctx context.Context, request CreateVpcEndpointRequest) (*VpcEndpoint, error) { var vpcEndpoint VpcEndpoint path := fmt.Sprintf("/api/2.0/accounts/%v/vpc-endpoints", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &vpcEndpoint) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &vpcEndpoint) return &vpcEndpoint, err } func (a *vpcEndpointsImpl) Delete(ctx context.Context, request DeleteVpcEndpointRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/vpc-endpoints/%v", a.client.ConfiguredAccountID(), request.VpcEndpointId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *vpcEndpointsImpl) Get(ctx context.Context, request GetVpcEndpointRequest) (*VpcEndpoint, error) { var vpcEndpoint VpcEndpoint path := fmt.Sprintf("/api/2.0/accounts/%v/vpc-endpoints/%v", a.client.ConfiguredAccountID(), request.VpcEndpointId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &vpcEndpoint) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &vpcEndpoint) return &vpcEndpoint, err } func (a *vpcEndpointsImpl) List(ctx context.Context) ([]VpcEndpoint, error) { var vpcEndpointList []VpcEndpoint path := fmt.Sprintf("/api/2.0/accounts/%v/vpc-endpoints", a.client.ConfiguredAccountID()) + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &vpcEndpointList) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &vpcEndpointList) return vpcEndpointList, err } @@ -280,46 +305,51 @@ type workspacesImpl struct { func (a *workspacesImpl) Create(ctx context.Context, request CreateWorkspaceRequest) (*Workspace, error) { var workspace Workspace path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &workspace) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &workspace) return &workspace, err } func (a *workspacesImpl) Delete(ctx context.Context, request DeleteWorkspaceRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *workspacesImpl) Get(ctx context.Context, request GetWorkspaceRequest) (*Workspace, error) { var workspace Workspace path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &workspace) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspace) return &workspace, err } func (a *workspacesImpl) List(ctx context.Context) ([]Workspace, error) { var workspaceList []Workspace path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces", a.client.ConfiguredAccountID()) + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &workspaceList) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &workspaceList) return workspaceList, err } func (a *workspacesImpl) Update(ctx context.Context, request UpdateWorkspaceRequest) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v", a.client.ConfiguredAccountID(), request.WorkspaceId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) return err } diff --git a/service/serving/impl.go b/service/serving/impl.go index 77476ef7f..d31d545f8 100755 --- a/service/serving/impl.go +++ b/service/serving/impl.go @@ -21,161 +21,178 @@ type servingEndpointsImpl struct { func (a *servingEndpointsImpl) BuildLogs(ctx context.Context, request BuildLogsRequest) (*BuildLogsResponse, error) { var buildLogsResponse BuildLogsResponse path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/served-models/%v/build-logs", request.Name, request.ServedModelName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &buildLogsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &buildLogsResponse) return &buildLogsResponse, err } func (a *servingEndpointsImpl) Create(ctx context.Context, request CreateServingEndpoint) (*ServingEndpointDetailed, error) { var servingEndpointDetailed ServingEndpointDetailed path := "/api/2.0/serving-endpoints" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &servingEndpointDetailed) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &servingEndpointDetailed) return &servingEndpointDetailed, err } func (a *servingEndpointsImpl) Delete(ctx context.Context, request DeleteServingEndpointRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/serving-endpoints/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *servingEndpointsImpl) ExportMetrics(ctx context.Context, request ExportMetricsRequest) (*ExportMetricsResponse, error) { var exportMetricsResponse ExportMetricsResponse path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/metrics", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "text/plain" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &exportMetricsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &exportMetricsResponse) return &exportMetricsResponse, err } func (a *servingEndpointsImpl) Get(ctx context.Context, request GetServingEndpointRequest) (*ServingEndpointDetailed, error) { var servingEndpointDetailed ServingEndpointDetailed path := fmt.Sprintf("/api/2.0/serving-endpoints/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &servingEndpointDetailed) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &servingEndpointDetailed) return &servingEndpointDetailed, err } func (a *servingEndpointsImpl) GetOpenApi(ctx context.Context, request GetOpenApiRequest) error { var getOpenApiResponse GetOpenApiResponse path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/openapi", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getOpenApiResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getOpenApiResponse) return err } func (a *servingEndpointsImpl) GetPermissionLevels(ctx context.Context, request GetServingEndpointPermissionLevelsRequest) (*GetServingEndpointPermissionLevelsResponse, error) { var getServingEndpointPermissionLevelsResponse GetServingEndpointPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/serving-endpoints/%v/permissionLevels", request.ServingEndpointId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getServingEndpointPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getServingEndpointPermissionLevelsResponse) return &getServingEndpointPermissionLevelsResponse, err } func (a *servingEndpointsImpl) GetPermissions(ctx context.Context, request GetServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { var servingEndpointPermissions ServingEndpointPermissions path := fmt.Sprintf("/api/2.0/permissions/serving-endpoints/%v", request.ServingEndpointId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &servingEndpointPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &servingEndpointPermissions) return &servingEndpointPermissions, err } func (a *servingEndpointsImpl) List(ctx context.Context) (*ListEndpointsResponse, error) { var listEndpointsResponse ListEndpointsResponse path := "/api/2.0/serving-endpoints" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listEndpointsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listEndpointsResponse) return &listEndpointsResponse, err } func (a *servingEndpointsImpl) Logs(ctx context.Context, request LogsRequest) (*ServerLogsResponse, error) { var serverLogsResponse ServerLogsResponse path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/served-models/%v/logs", request.Name, request.ServedModelName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &serverLogsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &serverLogsResponse) return &serverLogsResponse, err } func (a *servingEndpointsImpl) Patch(ctx context.Context, request PatchServingEndpointTags) ([]EndpointTag, error) { var endpointTagList []EndpointTag path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/tags", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &endpointTagList) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &endpointTagList) return endpointTagList, err } func (a *servingEndpointsImpl) Put(ctx context.Context, request PutRequest) (*PutResponse, error) { var putResponse PutResponse path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/rate-limits", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &putResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &putResponse) return &putResponse, err } func (a *servingEndpointsImpl) PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) { var putAiGatewayResponse PutAiGatewayResponse path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/ai-gateway", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &putAiGatewayResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &putAiGatewayResponse) return &putAiGatewayResponse, err } func (a *servingEndpointsImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { var queryEndpointResponse QueryEndpointResponse path := fmt.Sprintf("/serving-endpoints/%v/invocations", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &queryEndpointResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &queryEndpointResponse) return &queryEndpointResponse, err } func (a *servingEndpointsImpl) SetPermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { var servingEndpointPermissions ServingEndpointPermissions path := fmt.Sprintf("/api/2.0/permissions/serving-endpoints/%v", request.ServingEndpointId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &servingEndpointPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &servingEndpointPermissions) return &servingEndpointPermissions, err } func (a *servingEndpointsImpl) UpdateConfig(ctx context.Context, request EndpointCoreConfigInput) (*ServingEndpointDetailed, error) { var servingEndpointDetailed ServingEndpointDetailed path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/config", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &servingEndpointDetailed) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &servingEndpointDetailed) return &servingEndpointDetailed, err } func (a *servingEndpointsImpl) UpdatePermissions(ctx context.Context, request ServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) { var servingEndpointPermissions ServingEndpointPermissions path := fmt.Sprintf("/api/2.0/permissions/serving-endpoints/%v", request.ServingEndpointId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &servingEndpointPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &servingEndpointPermissions) return &servingEndpointPermissions, err } @@ -219,6 +236,9 @@ func (a *servingEndpointsDataPlaneImpl) Query(ctx context.Context, request Query headers["Content-Type"] = "application/json" opts := []httpclient.DoOption{} opts = append(opts, httpclient.WithRequestHeaders(headers)) + queryParams := make(map[string]any) + opts = append(opts, httpclient.WithQueryParameters(queryParams)) + var queryEndpointResponse QueryEndpointResponse opts = append(opts, httpclient.WithRequestData(request)) opts = append(opts, httpclient.WithResponseUnmarshal(&queryEndpointResponse)) diff --git a/service/settings/impl.go b/service/settings/impl.go index 3681bffc2..ea0048fc6 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -18,57 +18,63 @@ type accountIpAccessListsImpl struct { func (a *accountIpAccessListsImpl) Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) { var createIpAccessListResponse CreateIpAccessListResponse path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createIpAccessListResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createIpAccessListResponse) return &createIpAccessListResponse, err } func (a *accountIpAccessListsImpl) Delete(ctx context.Context, request DeleteAccountIpAccessListRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *accountIpAccessListsImpl) Get(ctx context.Context, request GetAccountIpAccessListRequest) (*GetIpAccessListResponse, error) { var getIpAccessListResponse GetIpAccessListResponse path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getIpAccessListResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getIpAccessListResponse) return &getIpAccessListResponse, err } func (a *accountIpAccessListsImpl) List(ctx context.Context) (*GetIpAccessListsResponse, error) { var getIpAccessListsResponse GetIpAccessListsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists", a.client.ConfiguredAccountID()) + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &getIpAccessListsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getIpAccessListsResponse) return &getIpAccessListsResponse, err } func (a *accountIpAccessListsImpl) Replace(ctx context.Context, request ReplaceIpAccessList) error { var replaceResponse ReplaceResponse path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &replaceResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &replaceResponse) return err } func (a *accountIpAccessListsImpl) Update(ctx context.Context, request UpdateIpAccessList) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) return err } @@ -85,28 +91,31 @@ type aibiDashboardEmbeddingAccessPolicyImpl struct { func (a *aibiDashboardEmbeddingAccessPolicyImpl) Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) (*DeleteAibiDashboardEmbeddingAccessPolicySettingResponse, error) { var deleteAibiDashboardEmbeddingAccessPolicySettingResponse DeleteAibiDashboardEmbeddingAccessPolicySettingResponse path := "/api/2.0/settings/types/aibi_dash_embed_ws_acc_policy/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteAibiDashboardEmbeddingAccessPolicySettingResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteAibiDashboardEmbeddingAccessPolicySettingResponse) return &deleteAibiDashboardEmbeddingAccessPolicySettingResponse, err } func (a *aibiDashboardEmbeddingAccessPolicyImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { var aibiDashboardEmbeddingAccessPolicySetting AibiDashboardEmbeddingAccessPolicySetting path := "/api/2.0/settings/types/aibi_dash_embed_ws_acc_policy/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &aibiDashboardEmbeddingAccessPolicySetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &aibiDashboardEmbeddingAccessPolicySetting) return &aibiDashboardEmbeddingAccessPolicySetting, err } func (a *aibiDashboardEmbeddingAccessPolicyImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingAccessPolicySettingRequest) (*AibiDashboardEmbeddingAccessPolicySetting, error) { var aibiDashboardEmbeddingAccessPolicySetting AibiDashboardEmbeddingAccessPolicySetting path := "/api/2.0/settings/types/aibi_dash_embed_ws_acc_policy/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &aibiDashboardEmbeddingAccessPolicySetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &aibiDashboardEmbeddingAccessPolicySetting) return &aibiDashboardEmbeddingAccessPolicySetting, err } @@ -118,28 +127,31 @@ type aibiDashboardEmbeddingApprovedDomainsImpl struct { func (a *aibiDashboardEmbeddingApprovedDomainsImpl) Delete(ctx context.Context, request DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse, error) { var deleteAibiDashboardEmbeddingApprovedDomainsSettingResponse DeleteAibiDashboardEmbeddingApprovedDomainsSettingResponse path := "/api/2.0/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteAibiDashboardEmbeddingApprovedDomainsSettingResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteAibiDashboardEmbeddingApprovedDomainsSettingResponse) return &deleteAibiDashboardEmbeddingApprovedDomainsSettingResponse, err } func (a *aibiDashboardEmbeddingApprovedDomainsImpl) Get(ctx context.Context, request GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { var aibiDashboardEmbeddingApprovedDomainsSetting AibiDashboardEmbeddingApprovedDomainsSetting path := "/api/2.0/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &aibiDashboardEmbeddingApprovedDomainsSetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &aibiDashboardEmbeddingApprovedDomainsSetting) return &aibiDashboardEmbeddingApprovedDomainsSetting, err } func (a *aibiDashboardEmbeddingApprovedDomainsImpl) Update(ctx context.Context, request UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest) (*AibiDashboardEmbeddingApprovedDomainsSetting, error) { var aibiDashboardEmbeddingApprovedDomainsSetting AibiDashboardEmbeddingApprovedDomainsSetting path := "/api/2.0/settings/types/aibi_dash_embed_ws_apprvd_domains/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &aibiDashboardEmbeddingApprovedDomainsSetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &aibiDashboardEmbeddingApprovedDomainsSetting) return &aibiDashboardEmbeddingApprovedDomainsSetting, err } @@ -151,19 +163,21 @@ type automaticClusterUpdateImpl struct { func (a *automaticClusterUpdateImpl) Get(ctx context.Context, request GetAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) { var automaticClusterUpdateSetting AutomaticClusterUpdateSetting path := "/api/2.0/settings/types/automatic_cluster_update/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &automaticClusterUpdateSetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &automaticClusterUpdateSetting) return &automaticClusterUpdateSetting, err } func (a *automaticClusterUpdateImpl) Update(ctx context.Context, request UpdateAutomaticClusterUpdateSettingRequest) (*AutomaticClusterUpdateSetting, error) { var automaticClusterUpdateSetting AutomaticClusterUpdateSetting path := "/api/2.0/settings/types/automatic_cluster_update/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &automaticClusterUpdateSetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &automaticClusterUpdateSetting) return &automaticClusterUpdateSetting, err } @@ -175,19 +189,21 @@ type complianceSecurityProfileImpl struct { func (a *complianceSecurityProfileImpl) Get(ctx context.Context, request GetComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) { var complianceSecurityProfileSetting ComplianceSecurityProfileSetting path := "/api/2.0/settings/types/shield_csp_enablement_ws_db/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &complianceSecurityProfileSetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &complianceSecurityProfileSetting) return &complianceSecurityProfileSetting, err } func (a *complianceSecurityProfileImpl) Update(ctx context.Context, request UpdateComplianceSecurityProfileSettingRequest) (*ComplianceSecurityProfileSetting, error) { var complianceSecurityProfileSetting ComplianceSecurityProfileSetting path := "/api/2.0/settings/types/shield_csp_enablement_ws_db/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &complianceSecurityProfileSetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &complianceSecurityProfileSetting) return &complianceSecurityProfileSetting, err } @@ -199,10 +215,11 @@ type credentialsManagerImpl struct { func (a *credentialsManagerImpl) ExchangeToken(ctx context.Context, request ExchangeTokenRequest) (*ExchangeTokenResponse, error) { var exchangeTokenResponse ExchangeTokenResponse path := "/api/2.0/credentials-manager/exchange-tokens/token" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &exchangeTokenResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &exchangeTokenResponse) return &exchangeTokenResponse, err } @@ -214,19 +231,21 @@ type cspEnablementAccountImpl struct { func (a *cspEnablementAccountImpl) Get(ctx context.Context, request GetCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) { var cspEnablementAccountSetting CspEnablementAccountSetting path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/shield_csp_enablement_ac/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &cspEnablementAccountSetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &cspEnablementAccountSetting) return &cspEnablementAccountSetting, err } func (a *cspEnablementAccountImpl) Update(ctx context.Context, request UpdateCspEnablementAccountSettingRequest) (*CspEnablementAccountSetting, error) { var cspEnablementAccountSetting CspEnablementAccountSetting path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/shield_csp_enablement_ac/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &cspEnablementAccountSetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &cspEnablementAccountSetting) return &cspEnablementAccountSetting, err } @@ -238,28 +257,31 @@ type defaultNamespaceImpl struct { func (a *defaultNamespaceImpl) Delete(ctx context.Context, request DeleteDefaultNamespaceSettingRequest) (*DeleteDefaultNamespaceSettingResponse, error) { var deleteDefaultNamespaceSettingResponse DeleteDefaultNamespaceSettingResponse path := "/api/2.0/settings/types/default_namespace_ws/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteDefaultNamespaceSettingResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDefaultNamespaceSettingResponse) return &deleteDefaultNamespaceSettingResponse, err } func (a *defaultNamespaceImpl) Get(ctx context.Context, request GetDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) { var defaultNamespaceSetting DefaultNamespaceSetting path := "/api/2.0/settings/types/default_namespace_ws/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &defaultNamespaceSetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &defaultNamespaceSetting) return &defaultNamespaceSetting, err } func (a *defaultNamespaceImpl) Update(ctx context.Context, request UpdateDefaultNamespaceSettingRequest) (*DefaultNamespaceSetting, error) { var defaultNamespaceSetting DefaultNamespaceSetting path := "/api/2.0/settings/types/default_namespace_ws/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &defaultNamespaceSetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &defaultNamespaceSetting) return &defaultNamespaceSetting, err } @@ -271,28 +293,31 @@ type disableLegacyAccessImpl struct { func (a *disableLegacyAccessImpl) Delete(ctx context.Context, request DeleteDisableLegacyAccessRequest) (*DeleteDisableLegacyAccessResponse, error) { var deleteDisableLegacyAccessResponse DeleteDisableLegacyAccessResponse path := "/api/2.0/settings/types/disable_legacy_access/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteDisableLegacyAccessResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDisableLegacyAccessResponse) return &deleteDisableLegacyAccessResponse, err } func (a *disableLegacyAccessImpl) Get(ctx context.Context, request GetDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { var disableLegacyAccess DisableLegacyAccess path := "/api/2.0/settings/types/disable_legacy_access/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &disableLegacyAccess) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &disableLegacyAccess) return &disableLegacyAccess, err } func (a *disableLegacyAccessImpl) Update(ctx context.Context, request UpdateDisableLegacyAccessRequest) (*DisableLegacyAccess, error) { var disableLegacyAccess DisableLegacyAccess path := "/api/2.0/settings/types/disable_legacy_access/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &disableLegacyAccess) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &disableLegacyAccess) return &disableLegacyAccess, err } @@ -304,28 +329,31 @@ type disableLegacyDbfsImpl struct { func (a *disableLegacyDbfsImpl) Delete(ctx context.Context, request DeleteDisableLegacyDbfsRequest) (*DeleteDisableLegacyDbfsResponse, error) { var deleteDisableLegacyDbfsResponse DeleteDisableLegacyDbfsResponse path := "/api/2.0/settings/types/disable_legacy_dbfs/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteDisableLegacyDbfsResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDisableLegacyDbfsResponse) return &deleteDisableLegacyDbfsResponse, err } func (a *disableLegacyDbfsImpl) Get(ctx context.Context, request GetDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) { var disableLegacyDbfs DisableLegacyDbfs path := "/api/2.0/settings/types/disable_legacy_dbfs/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &disableLegacyDbfs) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &disableLegacyDbfs) return &disableLegacyDbfs, err } func (a *disableLegacyDbfsImpl) Update(ctx context.Context, request UpdateDisableLegacyDbfsRequest) (*DisableLegacyDbfs, error) { var disableLegacyDbfs DisableLegacyDbfs path := "/api/2.0/settings/types/disable_legacy_dbfs/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &disableLegacyDbfs) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &disableLegacyDbfs) return &disableLegacyDbfs, err } @@ -337,28 +365,31 @@ type disableLegacyFeaturesImpl struct { func (a *disableLegacyFeaturesImpl) Delete(ctx context.Context, request DeleteDisableLegacyFeaturesRequest) (*DeleteDisableLegacyFeaturesResponse, error) { var deleteDisableLegacyFeaturesResponse DeleteDisableLegacyFeaturesResponse path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteDisableLegacyFeaturesResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDisableLegacyFeaturesResponse) return &deleteDisableLegacyFeaturesResponse, err } func (a *disableLegacyFeaturesImpl) Get(ctx context.Context, request GetDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { var disableLegacyFeatures DisableLegacyFeatures path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &disableLegacyFeatures) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &disableLegacyFeatures) return &disableLegacyFeatures, err } func (a *disableLegacyFeaturesImpl) Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) { var disableLegacyFeatures DisableLegacyFeatures path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/disable_legacy_features/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &disableLegacyFeatures) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &disableLegacyFeatures) return &disableLegacyFeatures, err } @@ -370,19 +401,21 @@ type enhancedSecurityMonitoringImpl struct { func (a *enhancedSecurityMonitoringImpl) Get(ctx context.Context, request GetEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) { var enhancedSecurityMonitoringSetting EnhancedSecurityMonitoringSetting path := "/api/2.0/settings/types/shield_esm_enablement_ws_db/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &enhancedSecurityMonitoringSetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &enhancedSecurityMonitoringSetting) return &enhancedSecurityMonitoringSetting, err } func (a *enhancedSecurityMonitoringImpl) Update(ctx context.Context, request UpdateEnhancedSecurityMonitoringSettingRequest) (*EnhancedSecurityMonitoringSetting, error) { var enhancedSecurityMonitoringSetting EnhancedSecurityMonitoringSetting path := "/api/2.0/settings/types/shield_esm_enablement_ws_db/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &enhancedSecurityMonitoringSetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &enhancedSecurityMonitoringSetting) return &enhancedSecurityMonitoringSetting, err } @@ -394,19 +427,21 @@ type esmEnablementAccountImpl struct { func (a *esmEnablementAccountImpl) Get(ctx context.Context, request GetEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) { var esmEnablementAccountSetting EsmEnablementAccountSetting path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/shield_esm_enablement_ac/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &esmEnablementAccountSetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &esmEnablementAccountSetting) return &esmEnablementAccountSetting, err } func (a *esmEnablementAccountImpl) Update(ctx context.Context, request UpdateEsmEnablementAccountSettingRequest) (*EsmEnablementAccountSetting, error) { var esmEnablementAccountSetting EsmEnablementAccountSetting path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/shield_esm_enablement_ac/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &esmEnablementAccountSetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &esmEnablementAccountSetting) return &esmEnablementAccountSetting, err } @@ -418,57 +453,63 @@ type ipAccessListsImpl struct { func (a *ipAccessListsImpl) Create(ctx context.Context, request CreateIpAccessList) (*CreateIpAccessListResponse, error) { var createIpAccessListResponse CreateIpAccessListResponse path := "/api/2.0/ip-access-lists" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createIpAccessListResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createIpAccessListResponse) return &createIpAccessListResponse, err } func (a *ipAccessListsImpl) Delete(ctx context.Context, request DeleteIpAccessListRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/ip-access-lists/%v", request.IpAccessListId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *ipAccessListsImpl) Get(ctx context.Context, request GetIpAccessListRequest) (*FetchIpAccessListResponse, error) { var fetchIpAccessListResponse FetchIpAccessListResponse path := fmt.Sprintf("/api/2.0/ip-access-lists/%v", request.IpAccessListId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &fetchIpAccessListResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &fetchIpAccessListResponse) return &fetchIpAccessListResponse, err } func (a *ipAccessListsImpl) List(ctx context.Context) (*ListIpAccessListResponse, error) { var listIpAccessListResponse ListIpAccessListResponse path := "/api/2.0/ip-access-lists" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listIpAccessListResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listIpAccessListResponse) return &listIpAccessListResponse, err } func (a *ipAccessListsImpl) Replace(ctx context.Context, request ReplaceIpAccessList) error { var replaceResponse ReplaceResponse path := fmt.Sprintf("/api/2.0/ip-access-lists/%v", request.IpAccessListId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &replaceResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &replaceResponse) return err } func (a *ipAccessListsImpl) Update(ctx context.Context, request UpdateIpAccessList) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/ip-access-lists/%v", request.IpAccessListId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) return err } @@ -480,74 +521,82 @@ type networkConnectivityImpl struct { func (a *networkConnectivityImpl) CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) { var networkConnectivityConfiguration NetworkConnectivityConfiguration path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &networkConnectivityConfiguration) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &networkConnectivityConfiguration) return &networkConnectivityConfiguration, err } func (a *networkConnectivityImpl) CreatePrivateEndpointRule(ctx context.Context, request CreatePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &nccAzurePrivateEndpointRule) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &nccAzurePrivateEndpointRule) return &nccAzurePrivateEndpointRule, err } func (a *networkConnectivityImpl) DeleteNetworkConnectivityConfiguration(ctx context.Context, request DeleteNetworkConnectivityConfigurationRequest) error { var deleteNetworkConnectivityConfigurationResponse DeleteNetworkConnectivityConfigurationResponse path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteNetworkConnectivityConfigurationResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteNetworkConnectivityConfigurationResponse) return err } func (a *networkConnectivityImpl) DeletePrivateEndpointRule(ctx context.Context, request DeletePrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId, request.PrivateEndpointRuleId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &nccAzurePrivateEndpointRule) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &nccAzurePrivateEndpointRule) return &nccAzurePrivateEndpointRule, err } func (a *networkConnectivityImpl) GetNetworkConnectivityConfiguration(ctx context.Context, request GetNetworkConnectivityConfigurationRequest) (*NetworkConnectivityConfiguration, error) { var networkConnectivityConfiguration NetworkConnectivityConfiguration path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &networkConnectivityConfiguration) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &networkConnectivityConfiguration) return &networkConnectivityConfiguration, err } func (a *networkConnectivityImpl) GetPrivateEndpointRule(ctx context.Context, request GetPrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) { var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId, request.PrivateEndpointRuleId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &nccAzurePrivateEndpointRule) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &nccAzurePrivateEndpointRule) return &nccAzurePrivateEndpointRule, err } func (a *networkConnectivityImpl) ListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { var listNetworkConnectivityConfigurationsResponse ListNetworkConnectivityConfigurationsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listNetworkConnectivityConfigurationsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listNetworkConnectivityConfigurationsResponse) return &listNetworkConnectivityConfigurationsResponse, err } func (a *networkConnectivityImpl) ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { var listNccAzurePrivateEndpointRulesResponse ListNccAzurePrivateEndpointRulesResponse path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listNccAzurePrivateEndpointRulesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listNccAzurePrivateEndpointRulesResponse) return &listNccAzurePrivateEndpointRulesResponse, err } @@ -559,47 +608,52 @@ type notificationDestinationsImpl struct { func (a *notificationDestinationsImpl) Create(ctx context.Context, request CreateNotificationDestinationRequest) (*NotificationDestination, error) { var notificationDestination NotificationDestination path := "/api/2.0/notification-destinations" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, ¬ificationDestination) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, ¬ificationDestination) return ¬ificationDestination, err } func (a *notificationDestinationsImpl) Delete(ctx context.Context, request DeleteNotificationDestinationRequest) error { var empty Empty path := fmt.Sprintf("/api/2.0/notification-destinations/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &empty) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) return err } func (a *notificationDestinationsImpl) Get(ctx context.Context, request GetNotificationDestinationRequest) (*NotificationDestination, error) { var notificationDestination NotificationDestination path := fmt.Sprintf("/api/2.0/notification-destinations/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, ¬ificationDestination) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, ¬ificationDestination) return ¬ificationDestination, err } func (a *notificationDestinationsImpl) List(ctx context.Context, request ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { var listNotificationDestinationsResponse ListNotificationDestinationsResponse path := "/api/2.0/notification-destinations" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listNotificationDestinationsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listNotificationDestinationsResponse) return &listNotificationDestinationsResponse, err } func (a *notificationDestinationsImpl) Update(ctx context.Context, request UpdateNotificationDestinationRequest) (*NotificationDestination, error) { var notificationDestination NotificationDestination path := fmt.Sprintf("/api/2.0/notification-destinations/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, ¬ificationDestination) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, ¬ificationDestination) return ¬ificationDestination, err } @@ -611,28 +665,31 @@ type personalComputeImpl struct { func (a *personalComputeImpl) Delete(ctx context.Context, request DeletePersonalComputeSettingRequest) (*DeletePersonalComputeSettingResponse, error) { var deletePersonalComputeSettingResponse DeletePersonalComputeSettingResponse path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/dcp_acct_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deletePersonalComputeSettingResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deletePersonalComputeSettingResponse) return &deletePersonalComputeSettingResponse, err } func (a *personalComputeImpl) Get(ctx context.Context, request GetPersonalComputeSettingRequest) (*PersonalComputeSetting, error) { var personalComputeSetting PersonalComputeSetting path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/dcp_acct_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &personalComputeSetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &personalComputeSetting) return &personalComputeSetting, err } func (a *personalComputeImpl) Update(ctx context.Context, request UpdatePersonalComputeSettingRequest) (*PersonalComputeSetting, error) { var personalComputeSetting PersonalComputeSetting path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/dcp_acct_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &personalComputeSetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &personalComputeSetting) return &personalComputeSetting, err } @@ -644,28 +701,31 @@ type restrictWorkspaceAdminsImpl struct { func (a *restrictWorkspaceAdminsImpl) Delete(ctx context.Context, request DeleteRestrictWorkspaceAdminsSettingRequest) (*DeleteRestrictWorkspaceAdminsSettingResponse, error) { var deleteRestrictWorkspaceAdminsSettingResponse DeleteRestrictWorkspaceAdminsSettingResponse path := "/api/2.0/settings/types/restrict_workspace_admins/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteRestrictWorkspaceAdminsSettingResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteRestrictWorkspaceAdminsSettingResponse) return &deleteRestrictWorkspaceAdminsSettingResponse, err } func (a *restrictWorkspaceAdminsImpl) Get(ctx context.Context, request GetRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) { var restrictWorkspaceAdminsSetting RestrictWorkspaceAdminsSetting path := "/api/2.0/settings/types/restrict_workspace_admins/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &restrictWorkspaceAdminsSetting) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &restrictWorkspaceAdminsSetting) return &restrictWorkspaceAdminsSetting, err } func (a *restrictWorkspaceAdminsImpl) Update(ctx context.Context, request UpdateRestrictWorkspaceAdminsSettingRequest) (*RestrictWorkspaceAdminsSetting, error) { var restrictWorkspaceAdminsSetting RestrictWorkspaceAdminsSetting path := "/api/2.0/settings/types/restrict_workspace_admins/names/default" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &restrictWorkspaceAdminsSetting) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &restrictWorkspaceAdminsSetting) return &restrictWorkspaceAdminsSetting, err } @@ -682,75 +742,83 @@ type tokenManagementImpl struct { func (a *tokenManagementImpl) CreateOboToken(ctx context.Context, request CreateOboTokenRequest) (*CreateOboTokenResponse, error) { var createOboTokenResponse CreateOboTokenResponse path := "/api/2.0/token-management/on-behalf-of/tokens" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createOboTokenResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createOboTokenResponse) return &createOboTokenResponse, err } func (a *tokenManagementImpl) Delete(ctx context.Context, request DeleteTokenManagementRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/token-management/tokens/%v", request.TokenId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *tokenManagementImpl) Get(ctx context.Context, request GetTokenManagementRequest) (*GetTokenResponse, error) { var getTokenResponse GetTokenResponse path := fmt.Sprintf("/api/2.0/token-management/tokens/%v", request.TokenId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getTokenResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getTokenResponse) return &getTokenResponse, err } func (a *tokenManagementImpl) GetPermissionLevels(ctx context.Context) (*GetTokenPermissionLevelsResponse, error) { var getTokenPermissionLevelsResponse GetTokenPermissionLevelsResponse path := "/api/2.0/permissions/authorization/tokens/permissionLevels" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &getTokenPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getTokenPermissionLevelsResponse) return &getTokenPermissionLevelsResponse, err } func (a *tokenManagementImpl) GetPermissions(ctx context.Context) (*TokenPermissions, error) { var tokenPermissions TokenPermissions path := "/api/2.0/permissions/authorization/tokens" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &tokenPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &tokenPermissions) return &tokenPermissions, err } func (a *tokenManagementImpl) List(ctx context.Context, request ListTokenManagementRequest) (*ListTokensResponse, error) { var listTokensResponse ListTokensResponse path := "/api/2.0/token-management/tokens" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listTokensResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listTokensResponse) return &listTokensResponse, err } func (a *tokenManagementImpl) SetPermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) { var tokenPermissions TokenPermissions path := "/api/2.0/permissions/authorization/tokens" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &tokenPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &tokenPermissions) return &tokenPermissions, err } func (a *tokenManagementImpl) UpdatePermissions(ctx context.Context, request TokenPermissionsRequest) (*TokenPermissions, error) { var tokenPermissions TokenPermissions path := "/api/2.0/permissions/authorization/tokens" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &tokenPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &tokenPermissions) return &tokenPermissions, err } @@ -762,29 +830,32 @@ type tokensImpl struct { func (a *tokensImpl) Create(ctx context.Context, request CreateTokenRequest) (*CreateTokenResponse, error) { var createTokenResponse CreateTokenResponse path := "/api/2.0/token/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createTokenResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createTokenResponse) return &createTokenResponse, err } func (a *tokensImpl) Delete(ctx context.Context, request RevokeTokenRequest) error { var revokeTokenResponse RevokeTokenResponse path := "/api/2.0/token/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &revokeTokenResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &revokeTokenResponse) return err } func (a *tokensImpl) List(ctx context.Context) (*ListPublicTokensResponse, error) { var listPublicTokensResponse ListPublicTokensResponse path := "/api/2.0/token/list" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listPublicTokensResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listPublicTokensResponse) return &listPublicTokensResponse, err } @@ -796,17 +867,19 @@ type workspaceConfImpl struct { func (a *workspaceConfImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*map[string]string, error) { var workspaceConf map[string]string path := "/api/2.0/workspace-conf" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &workspaceConf) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspaceConf) return &workspaceConf, err } func (a *workspaceConfImpl) SetStatus(ctx context.Context, request WorkspaceConf) error { var setStatusResponse SetStatusResponse path := "/api/2.0/workspace-conf" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &setStatusResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &setStatusResponse) return err } diff --git a/service/sharing/impl.go b/service/sharing/impl.go index 8eb8b3378..c6e2bccb6 100755 --- a/service/sharing/impl.go +++ b/service/sharing/impl.go @@ -20,56 +20,62 @@ type providersImpl struct { func (a *providersImpl) Create(ctx context.Context, request CreateProvider) (*ProviderInfo, error) { var providerInfo ProviderInfo path := "/api/2.1/unity-catalog/providers" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &providerInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &providerInfo) return &providerInfo, err } func (a *providersImpl) Delete(ctx context.Context, request DeleteProviderRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/providers/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *providersImpl) Get(ctx context.Context, request GetProviderRequest) (*ProviderInfo, error) { var providerInfo ProviderInfo path := fmt.Sprintf("/api/2.1/unity-catalog/providers/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &providerInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &providerInfo) return &providerInfo, err } func (a *providersImpl) List(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { var listProvidersResponse ListProvidersResponse path := "/api/2.1/unity-catalog/providers" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listProvidersResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listProvidersResponse) return &listProvidersResponse, err } func (a *providersImpl) ListShares(ctx context.Context, request ListSharesRequest) (*ListProviderSharesResponse, error) { var listProviderSharesResponse ListProviderSharesResponse path := fmt.Sprintf("/api/2.1/unity-catalog/providers/%v/shares", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listProviderSharesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listProviderSharesResponse) return &listProviderSharesResponse, err } func (a *providersImpl) Update(ctx context.Context, request UpdateProvider) (*ProviderInfo, error) { var providerInfo ProviderInfo path := fmt.Sprintf("/api/2.1/unity-catalog/providers/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &providerInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &providerInfo) return &providerInfo, err } @@ -81,18 +87,20 @@ type recipientActivationImpl struct { func (a *recipientActivationImpl) GetActivationUrlInfo(ctx context.Context, request GetActivationUrlInfoRequest) error { var getActivationUrlInfoResponse GetActivationUrlInfoResponse path := fmt.Sprintf("/api/2.1/unity-catalog/public/data_sharing_activation_info/%v", request.ActivationUrl) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getActivationUrlInfoResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getActivationUrlInfoResponse) return err } func (a *recipientActivationImpl) RetrieveToken(ctx context.Context, request RetrieveTokenRequest) (*RetrieveTokenResponse, error) { var retrieveTokenResponse RetrieveTokenResponse path := fmt.Sprintf("/api/2.1/unity-catalog/public/data_sharing_activation/%v", request.ActivationUrl) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &retrieveTokenResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &retrieveTokenResponse) return &retrieveTokenResponse, err } @@ -104,66 +112,73 @@ type recipientsImpl struct { func (a *recipientsImpl) Create(ctx context.Context, request CreateRecipient) (*RecipientInfo, error) { var recipientInfo RecipientInfo path := "/api/2.1/unity-catalog/recipients" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &recipientInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &recipientInfo) return &recipientInfo, err } func (a *recipientsImpl) Delete(ctx context.Context, request DeleteRecipientRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/recipients/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *recipientsImpl) Get(ctx context.Context, request GetRecipientRequest) (*RecipientInfo, error) { var recipientInfo RecipientInfo path := fmt.Sprintf("/api/2.1/unity-catalog/recipients/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &recipientInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &recipientInfo) return &recipientInfo, err } func (a *recipientsImpl) List(ctx context.Context, request ListRecipientsRequest) (*ListRecipientsResponse, error) { var listRecipientsResponse ListRecipientsResponse path := "/api/2.1/unity-catalog/recipients" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listRecipientsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listRecipientsResponse) return &listRecipientsResponse, err } func (a *recipientsImpl) RotateToken(ctx context.Context, request RotateRecipientToken) (*RecipientInfo, error) { var recipientInfo RecipientInfo path := fmt.Sprintf("/api/2.1/unity-catalog/recipients/%v/rotate-token", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &recipientInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &recipientInfo) return &recipientInfo, err } func (a *recipientsImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*GetRecipientSharePermissionsResponse, error) { var getRecipientSharePermissionsResponse GetRecipientSharePermissionsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/recipients/%v/share-permissions", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getRecipientSharePermissionsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRecipientSharePermissionsResponse) return &getRecipientSharePermissionsResponse, err } func (a *recipientsImpl) Update(ctx context.Context, request UpdateRecipient) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.1/unity-catalog/recipients/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) return err } @@ -175,65 +190,74 @@ type sharesImpl struct { func (a *sharesImpl) Create(ctx context.Context, request CreateShare) (*ShareInfo, error) { var shareInfo ShareInfo path := "/api/2.1/unity-catalog/shares" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &shareInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &shareInfo) return &shareInfo, err } func (a *sharesImpl) Delete(ctx context.Context, request DeleteShareRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.1/unity-catalog/shares/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *sharesImpl) Get(ctx context.Context, request GetShareRequest) (*ShareInfo, error) { var shareInfo ShareInfo path := fmt.Sprintf("/api/2.1/unity-catalog/shares/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &shareInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &shareInfo) return &shareInfo, err } func (a *sharesImpl) List(ctx context.Context, request ListSharesRequest) (*ListSharesResponse, error) { var listSharesResponse ListSharesResponse path := "/api/2.1/unity-catalog/shares" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listSharesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSharesResponse) return &listSharesResponse, err } func (a *sharesImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*catalog.PermissionsList, error) { var permissionsList catalog.PermissionsList path := fmt.Sprintf("/api/2.1/unity-catalog/shares/%v/permissions", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &permissionsList) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &permissionsList) return &permissionsList, err } func (a *sharesImpl) Update(ctx context.Context, request UpdateShare) (*ShareInfo, error) { var shareInfo ShareInfo path := fmt.Sprintf("/api/2.1/unity-catalog/shares/%v", request.Name) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &shareInfo) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &shareInfo) return &shareInfo, err } func (a *sharesImpl) UpdatePermissions(ctx context.Context, request UpdateSharePermissions) error { var updatePermissionsResponse UpdatePermissionsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/shares/%v/permissions", request.Name) + queryParams := make(map[string]any) + queryParams["max_results"] = request.MaxResults + queryParams["page_token"] = request.PageToken headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updatePermissionsResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updatePermissionsResponse) return err } diff --git a/service/sql/impl.go b/service/sql/impl.go index eb053fd97..5ab751c15 100755 --- a/service/sql/impl.go +++ b/service/sql/impl.go @@ -18,47 +18,52 @@ type alertsImpl struct { func (a *alertsImpl) Create(ctx context.Context, request CreateAlertRequest) (*Alert, error) { var alert Alert path := "/api/2.0/sql/alerts" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &alert) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &alert) return &alert, err } func (a *alertsImpl) Delete(ctx context.Context, request TrashAlertRequest) error { var empty Empty path := fmt.Sprintf("/api/2.0/sql/alerts/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &empty) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) return err } func (a *alertsImpl) Get(ctx context.Context, request GetAlertRequest) (*Alert, error) { var alert Alert path := fmt.Sprintf("/api/2.0/sql/alerts/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &alert) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &alert) return &alert, err } func (a *alertsImpl) List(ctx context.Context, request ListAlertsRequest) (*ListAlertsResponse, error) { var listAlertsResponse ListAlertsResponse path := "/api/2.0/sql/alerts" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAlertsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAlertsResponse) return &listAlertsResponse, err } func (a *alertsImpl) Update(ctx context.Context, request UpdateAlertRequest) (*Alert, error) { var alert Alert path := fmt.Sprintf("/api/2.0/sql/alerts/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &alert) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &alert) return &alert, err } @@ -70,47 +75,52 @@ type alertsLegacyImpl struct { func (a *alertsLegacyImpl) Create(ctx context.Context, request CreateAlert) (*LegacyAlert, error) { var legacyAlert LegacyAlert path := "/api/2.0/preview/sql/alerts" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &legacyAlert) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyAlert) return &legacyAlert, err } func (a *alertsLegacyImpl) Delete(ctx context.Context, request DeleteAlertsLegacyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v", request.AlertId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *alertsLegacyImpl) Get(ctx context.Context, request GetAlertsLegacyRequest) (*LegacyAlert, error) { var legacyAlert LegacyAlert path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v", request.AlertId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &legacyAlert) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &legacyAlert) return &legacyAlert, err } func (a *alertsLegacyImpl) List(ctx context.Context) ([]LegacyAlert, error) { var legacyAlertList []LegacyAlert path := "/api/2.0/preview/sql/alerts" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &legacyAlertList) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &legacyAlertList) return legacyAlertList, err } func (a *alertsLegacyImpl) Update(ctx context.Context, request EditAlert) error { var updateResponse UpdateResponse path := fmt.Sprintf("/api/2.0/preview/sql/alerts/%v", request.AlertId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &updateResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err } @@ -122,29 +132,32 @@ type dashboardWidgetsImpl struct { func (a *dashboardWidgetsImpl) Create(ctx context.Context, request CreateWidget) (*Widget, error) { var widget Widget path := "/api/2.0/preview/sql/widgets" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &widget) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &widget) return &widget, err } func (a *dashboardWidgetsImpl) Delete(ctx context.Context, request DeleteDashboardWidgetRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/preview/sql/widgets/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *dashboardWidgetsImpl) Update(ctx context.Context, request CreateWidget) (*Widget, error) { var widget Widget path := fmt.Sprintf("/api/2.0/preview/sql/widgets/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &widget) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &widget) return &widget, err } @@ -156,56 +169,62 @@ type dashboardsImpl struct { func (a *dashboardsImpl) Create(ctx context.Context, request DashboardPostContent) (*Dashboard, error) { var dashboard Dashboard path := "/api/2.0/preview/sql/dashboards" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &dashboard) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &dashboard) return &dashboard, err } func (a *dashboardsImpl) Delete(ctx context.Context, request DeleteDashboardRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/preview/sql/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *dashboardsImpl) Get(ctx context.Context, request GetDashboardRequest) (*Dashboard, error) { var dashboard Dashboard path := fmt.Sprintf("/api/2.0/preview/sql/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &dashboard) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &dashboard) return &dashboard, err } func (a *dashboardsImpl) List(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error) { var listResponse ListResponse path := "/api/2.0/preview/sql/dashboards" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listResponse) return &listResponse, err } func (a *dashboardsImpl) Restore(ctx context.Context, request RestoreDashboardRequest) error { var restoreResponse RestoreResponse path := fmt.Sprintf("/api/2.0/preview/sql/dashboards/trash/%v", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &restoreResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &restoreResponse) return err } func (a *dashboardsImpl) Update(ctx context.Context, request DashboardEditContent) (*Dashboard, error) { var dashboard Dashboard path := fmt.Sprintf("/api/2.0/preview/sql/dashboards/%v", request.DashboardId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &dashboard) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &dashboard) return &dashboard, err } @@ -217,9 +236,10 @@ type dataSourcesImpl struct { func (a *dataSourcesImpl) List(ctx context.Context) ([]DataSource, error) { var dataSourceList []DataSource path := "/api/2.0/preview/sql/data_sources" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &dataSourceList) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &dataSourceList) return dataSourceList, err } @@ -231,29 +251,32 @@ type dbsqlPermissionsImpl struct { func (a *dbsqlPermissionsImpl) Get(ctx context.Context, request GetDbsqlPermissionRequest) (*GetResponse, error) { var getResponse GetResponse path := fmt.Sprintf("/api/2.0/preview/sql/permissions/%v/%v", request.ObjectType, request.ObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getResponse) return &getResponse, err } func (a *dbsqlPermissionsImpl) Set(ctx context.Context, request SetRequest) (*SetResponse, error) { var setResponse SetResponse path := fmt.Sprintf("/api/2.0/preview/sql/permissions/%v/%v", request.ObjectType, request.ObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &setResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &setResponse) return &setResponse, err } func (a *dbsqlPermissionsImpl) TransferOwnership(ctx context.Context, request TransferOwnershipRequest) (*Success, error) { var success Success path := fmt.Sprintf("/api/2.0/preview/sql/permissions/%v/%v/transfer", request.ObjectType, request.ObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &success) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &success) return &success, err } @@ -265,56 +288,62 @@ type queriesImpl struct { func (a *queriesImpl) Create(ctx context.Context, request CreateQueryRequest) (*Query, error) { var query Query path := "/api/2.0/sql/queries" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &query) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &query) return &query, err } func (a *queriesImpl) Delete(ctx context.Context, request TrashQueryRequest) error { var empty Empty path := fmt.Sprintf("/api/2.0/sql/queries/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &empty) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) return err } func (a *queriesImpl) Get(ctx context.Context, request GetQueryRequest) (*Query, error) { var query Query path := fmt.Sprintf("/api/2.0/sql/queries/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &query) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &query) return &query, err } func (a *queriesImpl) List(ctx context.Context, request ListQueriesRequest) (*ListQueryObjectsResponse, error) { var listQueryObjectsResponse ListQueryObjectsResponse path := "/api/2.0/sql/queries" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listQueryObjectsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listQueryObjectsResponse) return &listQueryObjectsResponse, err } func (a *queriesImpl) ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { var listVisualizationsForQueryResponse ListVisualizationsForQueryResponse path := fmt.Sprintf("/api/2.0/sql/queries/%v/visualizations", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listVisualizationsForQueryResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listVisualizationsForQueryResponse) return &listVisualizationsForQueryResponse, err } func (a *queriesImpl) Update(ctx context.Context, request UpdateQueryRequest) (*Query, error) { var query Query path := fmt.Sprintf("/api/2.0/sql/queries/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &query) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &query) return &query, err } @@ -326,56 +355,62 @@ type queriesLegacyImpl struct { func (a *queriesLegacyImpl) Create(ctx context.Context, request QueryPostContent) (*LegacyQuery, error) { var legacyQuery LegacyQuery path := "/api/2.0/preview/sql/queries" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &legacyQuery) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyQuery) return &legacyQuery, err } func (a *queriesLegacyImpl) Delete(ctx context.Context, request DeleteQueriesLegacyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/preview/sql/queries/%v", request.QueryId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *queriesLegacyImpl) Get(ctx context.Context, request GetQueriesLegacyRequest) (*LegacyQuery, error) { var legacyQuery LegacyQuery path := fmt.Sprintf("/api/2.0/preview/sql/queries/%v", request.QueryId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &legacyQuery) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &legacyQuery) return &legacyQuery, err } func (a *queriesLegacyImpl) List(ctx context.Context, request ListQueriesLegacyRequest) (*QueryList, error) { var queryList QueryList path := "/api/2.0/preview/sql/queries" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &queryList) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &queryList) return &queryList, err } func (a *queriesLegacyImpl) Restore(ctx context.Context, request RestoreQueriesLegacyRequest) error { var restoreResponse RestoreResponse path := fmt.Sprintf("/api/2.0/preview/sql/queries/trash/%v", request.QueryId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &restoreResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &restoreResponse) return err } func (a *queriesLegacyImpl) Update(ctx context.Context, request QueryEditContent) (*LegacyQuery, error) { var legacyQuery LegacyQuery path := fmt.Sprintf("/api/2.0/preview/sql/queries/%v", request.QueryId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &legacyQuery) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyQuery) return &legacyQuery, err } @@ -387,9 +422,10 @@ type queryHistoryImpl struct { func (a *queryHistoryImpl) List(ctx context.Context, request ListQueryHistoryRequest) (*ListQueriesResponse, error) { var listQueriesResponse ListQueriesResponse path := "/api/2.0/sql/history/queries" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listQueriesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listQueriesResponse) return &listQueriesResponse, err } @@ -401,29 +437,32 @@ type queryVisualizationsImpl struct { func (a *queryVisualizationsImpl) Create(ctx context.Context, request CreateVisualizationRequest) (*Visualization, error) { var visualization Visualization path := "/api/2.0/sql/visualizations" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &visualization) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &visualization) return &visualization, err } func (a *queryVisualizationsImpl) Delete(ctx context.Context, request DeleteVisualizationRequest) error { var empty Empty path := fmt.Sprintf("/api/2.0/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &empty) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) return err } func (a *queryVisualizationsImpl) Update(ctx context.Context, request UpdateVisualizationRequest) (*Visualization, error) { var visualization Visualization path := fmt.Sprintf("/api/2.0/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &visualization) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &visualization) return &visualization, err } @@ -435,29 +474,32 @@ type queryVisualizationsLegacyImpl struct { func (a *queryVisualizationsLegacyImpl) Create(ctx context.Context, request CreateQueryVisualizationsLegacyRequest) (*LegacyVisualization, error) { var legacyVisualization LegacyVisualization path := "/api/2.0/preview/sql/visualizations" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &legacyVisualization) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyVisualization) return &legacyVisualization, err } func (a *queryVisualizationsLegacyImpl) Delete(ctx context.Context, request DeleteQueryVisualizationsLegacyRequest) error { var deleteResponse DeleteResponse path := fmt.Sprintf("/api/2.0/preview/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } func (a *queryVisualizationsLegacyImpl) Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error) { var legacyVisualization LegacyVisualization path := fmt.Sprintf("/api/2.0/preview/sql/visualizations/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &legacyVisualization) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &legacyVisualization) return &legacyVisualization, err } @@ -469,36 +511,40 @@ type statementExecutionImpl struct { func (a *statementExecutionImpl) CancelExecution(ctx context.Context, request CancelExecutionRequest) error { var cancelExecutionResponse CancelExecutionResponse path := fmt.Sprintf("/api/2.0/sql/statements/%v/cancel", request.StatementId) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &cancelExecutionResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &cancelExecutionResponse) return err } func (a *statementExecutionImpl) ExecuteStatement(ctx context.Context, request ExecuteStatementRequest) (*StatementResponse, error) { var statementResponse StatementResponse path := "/api/2.0/sql/statements/" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &statementResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &statementResponse) return &statementResponse, err } func (a *statementExecutionImpl) GetStatement(ctx context.Context, request GetStatementRequest) (*StatementResponse, error) { var statementResponse StatementResponse path := fmt.Sprintf("/api/2.0/sql/statements/%v", request.StatementId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &statementResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &statementResponse) return &statementResponse, err } func (a *statementExecutionImpl) GetStatementResultChunkN(ctx context.Context, request GetStatementResultChunkNRequest) (*ResultData, error) { var resultData ResultData path := fmt.Sprintf("/api/2.0/sql/statements/%v/result/chunks/%v", request.StatementId, request.ChunkIndex) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &resultData) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &resultData) return &resultData, err } @@ -510,121 +556,134 @@ type warehousesImpl struct { func (a *warehousesImpl) Create(ctx context.Context, request CreateWarehouseRequest) (*CreateWarehouseResponse, error) { var createWarehouseResponse CreateWarehouseResponse path := "/api/2.0/sql/warehouses" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createWarehouseResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createWarehouseResponse) return &createWarehouseResponse, err } func (a *warehousesImpl) Delete(ctx context.Context, request DeleteWarehouseRequest) error { var deleteWarehouseResponse DeleteWarehouseResponse path := fmt.Sprintf("/api/2.0/sql/warehouses/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteWarehouseResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteWarehouseResponse) return err } func (a *warehousesImpl) Edit(ctx context.Context, request EditWarehouseRequest) error { var editWarehouseResponse EditWarehouseResponse path := fmt.Sprintf("/api/2.0/sql/warehouses/%v/edit", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &editWarehouseResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &editWarehouseResponse) return err } func (a *warehousesImpl) Get(ctx context.Context, request GetWarehouseRequest) (*GetWarehouseResponse, error) { var getWarehouseResponse GetWarehouseResponse path := fmt.Sprintf("/api/2.0/sql/warehouses/%v", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getWarehouseResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getWarehouseResponse) return &getWarehouseResponse, err } func (a *warehousesImpl) GetPermissionLevels(ctx context.Context, request GetWarehousePermissionLevelsRequest) (*GetWarehousePermissionLevelsResponse, error) { var getWarehousePermissionLevelsResponse GetWarehousePermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/warehouses/%v/permissionLevels", request.WarehouseId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getWarehousePermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getWarehousePermissionLevelsResponse) return &getWarehousePermissionLevelsResponse, err } func (a *warehousesImpl) GetPermissions(ctx context.Context, request GetWarehousePermissionsRequest) (*WarehousePermissions, error) { var warehousePermissions WarehousePermissions path := fmt.Sprintf("/api/2.0/permissions/warehouses/%v", request.WarehouseId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &warehousePermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &warehousePermissions) return &warehousePermissions, err } func (a *warehousesImpl) GetWorkspaceWarehouseConfig(ctx context.Context) (*GetWorkspaceWarehouseConfigResponse, error) { var getWorkspaceWarehouseConfigResponse GetWorkspaceWarehouseConfigResponse path := "/api/2.0/sql/config/warehouses" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &getWorkspaceWarehouseConfigResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &getWorkspaceWarehouseConfigResponse) return &getWorkspaceWarehouseConfigResponse, err } func (a *warehousesImpl) List(ctx context.Context, request ListWarehousesRequest) (*ListWarehousesResponse, error) { var listWarehousesResponse ListWarehousesResponse path := "/api/2.0/sql/warehouses" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listWarehousesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listWarehousesResponse) return &listWarehousesResponse, err } func (a *warehousesImpl) SetPermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) { var warehousePermissions WarehousePermissions path := fmt.Sprintf("/api/2.0/permissions/warehouses/%v", request.WarehouseId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &warehousePermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &warehousePermissions) return &warehousePermissions, err } func (a *warehousesImpl) SetWorkspaceWarehouseConfig(ctx context.Context, request SetWorkspaceWarehouseConfigRequest) error { var setWorkspaceWarehouseConfigResponse SetWorkspaceWarehouseConfigResponse path := "/api/2.0/sql/config/warehouses" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &setWorkspaceWarehouseConfigResponse) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &setWorkspaceWarehouseConfigResponse) return err } func (a *warehousesImpl) Start(ctx context.Context, request StartRequest) error { var startWarehouseResponse StartWarehouseResponse path := fmt.Sprintf("/api/2.0/sql/warehouses/%v/start", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &startWarehouseResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &startWarehouseResponse) return err } func (a *warehousesImpl) Stop(ctx context.Context, request StopRequest) error { var stopWarehouseResponse StopWarehouseResponse path := fmt.Sprintf("/api/2.0/sql/warehouses/%v/stop", request.Id) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &stopWarehouseResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &stopWarehouseResponse) return err } func (a *warehousesImpl) UpdatePermissions(ctx context.Context, request WarehousePermissionsRequest) (*WarehousePermissions, error) { var warehousePermissions WarehousePermissions path := fmt.Sprintf("/api/2.0/permissions/warehouses/%v", request.WarehouseId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &warehousePermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &warehousePermissions) return &warehousePermissions, err } diff --git a/service/vectorsearch/impl.go b/service/vectorsearch/impl.go index 92563db06..1e69a9afe 100755 --- a/service/vectorsearch/impl.go +++ b/service/vectorsearch/impl.go @@ -18,36 +18,40 @@ type vectorSearchEndpointsImpl struct { func (a *vectorSearchEndpointsImpl) CreateEndpoint(ctx context.Context, request CreateEndpoint) (*EndpointInfo, error) { var endpointInfo EndpointInfo path := "/api/2.0/vector-search/endpoints" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &endpointInfo) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &endpointInfo) return &endpointInfo, err } func (a *vectorSearchEndpointsImpl) DeleteEndpoint(ctx context.Context, request DeleteEndpointRequest) error { var deleteEndpointResponse DeleteEndpointResponse path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v", request.EndpointName) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteEndpointResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteEndpointResponse) return err } func (a *vectorSearchEndpointsImpl) GetEndpoint(ctx context.Context, request GetEndpointRequest) (*EndpointInfo, error) { var endpointInfo EndpointInfo path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v", request.EndpointName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &endpointInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &endpointInfo) return &endpointInfo, err } func (a *vectorSearchEndpointsImpl) ListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) { var listEndpointResponse ListEndpointResponse path := "/api/2.0/vector-search/endpoints" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listEndpointResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listEndpointResponse) return &listEndpointResponse, err } @@ -59,93 +63,103 @@ type vectorSearchIndexesImpl struct { func (a *vectorSearchIndexesImpl) CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) { var createVectorIndexResponse CreateVectorIndexResponse path := "/api/2.0/vector-search/indexes" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createVectorIndexResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createVectorIndexResponse) return &createVectorIndexResponse, err } func (a *vectorSearchIndexesImpl) DeleteDataVectorIndex(ctx context.Context, request DeleteDataVectorIndexRequest) (*DeleteDataVectorIndexResponse, error) { var deleteDataVectorIndexResponse DeleteDataVectorIndexResponse path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v/delete-data", request.IndexName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteDataVectorIndexResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteDataVectorIndexResponse) return &deleteDataVectorIndexResponse, err } func (a *vectorSearchIndexesImpl) DeleteIndex(ctx context.Context, request DeleteIndexRequest) error { var deleteIndexResponse DeleteIndexResponse path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v", request.IndexName) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteIndexResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteIndexResponse) return err } func (a *vectorSearchIndexesImpl) GetIndex(ctx context.Context, request GetIndexRequest) (*VectorIndex, error) { var vectorIndex VectorIndex path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v", request.IndexName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &vectorIndex) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &vectorIndex) return &vectorIndex, err } func (a *vectorSearchIndexesImpl) ListIndexes(ctx context.Context, request ListIndexesRequest) (*ListVectorIndexesResponse, error) { var listVectorIndexesResponse ListVectorIndexesResponse path := "/api/2.0/vector-search/indexes" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listVectorIndexesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listVectorIndexesResponse) return &listVectorIndexesResponse, err } func (a *vectorSearchIndexesImpl) QueryIndex(ctx context.Context, request QueryVectorIndexRequest) (*QueryVectorIndexResponse, error) { var queryVectorIndexResponse QueryVectorIndexResponse path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v/query", request.IndexName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &queryVectorIndexResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &queryVectorIndexResponse) return &queryVectorIndexResponse, err } func (a *vectorSearchIndexesImpl) QueryNextPage(ctx context.Context, request QueryVectorIndexNextPageRequest) (*QueryVectorIndexResponse, error) { var queryVectorIndexResponse QueryVectorIndexResponse path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v/query-next-page", request.IndexName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &queryVectorIndexResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &queryVectorIndexResponse) return &queryVectorIndexResponse, err } func (a *vectorSearchIndexesImpl) ScanIndex(ctx context.Context, request ScanVectorIndexRequest) (*ScanVectorIndexResponse, error) { var scanVectorIndexResponse ScanVectorIndexResponse path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v/scan", request.IndexName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &scanVectorIndexResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &scanVectorIndexResponse) return &scanVectorIndexResponse, err } func (a *vectorSearchIndexesImpl) SyncIndex(ctx context.Context, request SyncIndexRequest) error { var syncIndexResponse SyncIndexResponse path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v/sync", request.IndexName) + queryParams := make(map[string]any) headers := make(map[string]string) - err := a.client.Do(ctx, http.MethodPost, path, headers, nil, &syncIndexResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &syncIndexResponse) return err } func (a *vectorSearchIndexesImpl) UpsertDataVectorIndex(ctx context.Context, request UpsertDataVectorIndexRequest) (*UpsertDataVectorIndexResponse, error) { var upsertDataVectorIndexResponse UpsertDataVectorIndexResponse path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v/upsert-data", request.IndexName) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &upsertDataVectorIndexResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &upsertDataVectorIndexResponse) return &upsertDataVectorIndexResponse, err } diff --git a/service/workspace/ext_utilities.go b/service/workspace/ext_utilities.go index d55053c3f..f851279e3 100644 --- a/service/workspace/ext_utilities.go +++ b/service/workspace/ext_utilities.go @@ -207,7 +207,7 @@ func (a *WorkspaceAPI) Upload(ctx context.Context, path string, r io.Reader, opt headers := map[string]string{ "Content-Type": w.FormDataContentType(), } - return a.workspaceImpl.client.Do(ctx, "POST", "/api/2.0/workspace/import", headers, buf.Bytes(), nil) + return a.workspaceImpl.client.Do(ctx, "POST", "/api/2.0/workspace/import", headers, nil, buf.Bytes(), nil) } // WriteFile is identical to [os.WriteFile] but for Workspace File. @@ -241,7 +241,7 @@ func (a *WorkspaceAPI) Download(ctx context.Context, path string, opts ...Downlo v(query) } headers := map[string]string{"Content-Type": "application/json"} - err := a.workspaceImpl.client.Do(ctx, "GET", "/api/2.0/workspace/export", headers, query, &buf) + err := a.workspaceImpl.client.Do(ctx, "GET", "/api/2.0/workspace/export", headers, nil, query, &buf) if err != nil { return nil, err } diff --git a/service/workspace/impl.go b/service/workspace/impl.go index 4542d181a..63af14521 100755 --- a/service/workspace/impl.go +++ b/service/workspace/impl.go @@ -18,47 +18,52 @@ type gitCredentialsImpl struct { func (a *gitCredentialsImpl) Create(ctx context.Context, request CreateCredentialsRequest) (*CreateCredentialsResponse, error) { var createCredentialsResponse CreateCredentialsResponse path := "/api/2.0/git-credentials" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createCredentialsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createCredentialsResponse) return &createCredentialsResponse, err } func (a *gitCredentialsImpl) Delete(ctx context.Context, request DeleteCredentialsRequest) error { var deleteCredentialsResponse DeleteCredentialsResponse path := fmt.Sprintf("/api/2.0/git-credentials/%v", request.CredentialId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteCredentialsResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteCredentialsResponse) return err } func (a *gitCredentialsImpl) Get(ctx context.Context, request GetCredentialsRequest) (*GetCredentialsResponse, error) { var getCredentialsResponse GetCredentialsResponse path := fmt.Sprintf("/api/2.0/git-credentials/%v", request.CredentialId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getCredentialsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCredentialsResponse) return &getCredentialsResponse, err } func (a *gitCredentialsImpl) List(ctx context.Context) (*ListCredentialsResponse, error) { var listCredentialsResponse ListCredentialsResponse path := "/api/2.0/git-credentials" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listCredentialsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listCredentialsResponse) return &listCredentialsResponse, err } func (a *gitCredentialsImpl) Update(ctx context.Context, request UpdateCredentialsRequest) error { var updateCredentialsResponse UpdateCredentialsResponse path := fmt.Sprintf("/api/2.0/git-credentials/%v", request.CredentialId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateCredentialsResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateCredentialsResponse) return err } @@ -70,85 +75,94 @@ type reposImpl struct { func (a *reposImpl) Create(ctx context.Context, request CreateRepoRequest) (*CreateRepoResponse, error) { var createRepoResponse CreateRepoResponse path := "/api/2.0/repos" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createRepoResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createRepoResponse) return &createRepoResponse, err } func (a *reposImpl) Delete(ctx context.Context, request DeleteRepoRequest) error { var deleteRepoResponse DeleteRepoResponse path := fmt.Sprintf("/api/2.0/repos/%v", request.RepoId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodDelete, path, headers, request, &deleteRepoResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteRepoResponse) return err } func (a *reposImpl) Get(ctx context.Context, request GetRepoRequest) (*GetRepoResponse, error) { var getRepoResponse GetRepoResponse path := fmt.Sprintf("/api/2.0/repos/%v", request.RepoId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getRepoResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRepoResponse) return &getRepoResponse, err } func (a *reposImpl) GetPermissionLevels(ctx context.Context, request GetRepoPermissionLevelsRequest) (*GetRepoPermissionLevelsResponse, error) { var getRepoPermissionLevelsResponse GetRepoPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/repos/%v/permissionLevels", request.RepoId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getRepoPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getRepoPermissionLevelsResponse) return &getRepoPermissionLevelsResponse, err } func (a *reposImpl) GetPermissions(ctx context.Context, request GetRepoPermissionsRequest) (*RepoPermissions, error) { var repoPermissions RepoPermissions path := fmt.Sprintf("/api/2.0/permissions/repos/%v", request.RepoId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &repoPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &repoPermissions) return &repoPermissions, err } func (a *reposImpl) List(ctx context.Context, request ListReposRequest) (*ListReposResponse, error) { var listReposResponse ListReposResponse path := "/api/2.0/repos" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listReposResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listReposResponse) return &listReposResponse, err } func (a *reposImpl) SetPermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) { var repoPermissions RepoPermissions path := fmt.Sprintf("/api/2.0/permissions/repos/%v", request.RepoId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &repoPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &repoPermissions) return &repoPermissions, err } func (a *reposImpl) Update(ctx context.Context, request UpdateRepoRequest) error { var updateRepoResponse UpdateRepoResponse path := fmt.Sprintf("/api/2.0/repos/%v", request.RepoId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &updateRepoResponse) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateRepoResponse) return err } func (a *reposImpl) UpdatePermissions(ctx context.Context, request RepoPermissionsRequest) (*RepoPermissions, error) { var repoPermissions RepoPermissions path := fmt.Sprintf("/api/2.0/permissions/repos/%v", request.RepoId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &repoPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &repoPermissions) return &repoPermissions, err } @@ -160,105 +174,116 @@ type secretsImpl struct { func (a *secretsImpl) CreateScope(ctx context.Context, request CreateScope) error { var createScopeResponse CreateScopeResponse path := "/api/2.0/secrets/scopes/create" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &createScopeResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createScopeResponse) return err } func (a *secretsImpl) DeleteAcl(ctx context.Context, request DeleteAcl) error { var deleteAclResponse DeleteAclResponse path := "/api/2.0/secrets/acls/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteAclResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteAclResponse) return err } func (a *secretsImpl) DeleteScope(ctx context.Context, request DeleteScope) error { var deleteScopeResponse DeleteScopeResponse path := "/api/2.0/secrets/scopes/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteScopeResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteScopeResponse) return err } func (a *secretsImpl) DeleteSecret(ctx context.Context, request DeleteSecret) error { var deleteSecretResponse DeleteSecretResponse path := "/api/2.0/secrets/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteSecretResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteSecretResponse) return err } func (a *secretsImpl) GetAcl(ctx context.Context, request GetAclRequest) (*AclItem, error) { var aclItem AclItem path := "/api/2.0/secrets/acls/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &aclItem) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &aclItem) return &aclItem, err } func (a *secretsImpl) GetSecret(ctx context.Context, request GetSecretRequest) (*GetSecretResponse, error) { var getSecretResponse GetSecretResponse path := "/api/2.0/secrets/get" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getSecretResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getSecretResponse) return &getSecretResponse, err } func (a *secretsImpl) ListAcls(ctx context.Context, request ListAclsRequest) (*ListAclsResponse, error) { var listAclsResponse ListAclsResponse path := "/api/2.0/secrets/acls/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listAclsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAclsResponse) return &listAclsResponse, err } func (a *secretsImpl) ListScopes(ctx context.Context) (*ListScopesResponse, error) { var listScopesResponse ListScopesResponse path := "/api/2.0/secrets/scopes/list" + headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, nil, &listScopesResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &listScopesResponse) return &listScopesResponse, err } func (a *secretsImpl) ListSecrets(ctx context.Context, request ListSecretsRequest) (*ListSecretsResponse, error) { var listSecretsResponse ListSecretsResponse path := "/api/2.0/secrets/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listSecretsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listSecretsResponse) return &listSecretsResponse, err } func (a *secretsImpl) PutAcl(ctx context.Context, request PutAcl) error { var putAclResponse PutAclResponse path := "/api/2.0/secrets/acls/put" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &putAclResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &putAclResponse) return err } func (a *secretsImpl) PutSecret(ctx context.Context, request PutSecret) error { var putSecretResponse PutSecretResponse path := "/api/2.0/secrets/put" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &putSecretResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &putSecretResponse) return err } @@ -270,94 +295,104 @@ type workspaceImpl struct { func (a *workspaceImpl) Delete(ctx context.Context, request Delete) error { var deleteResponse DeleteResponse path := "/api/2.0/workspace/delete" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &deleteResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteResponse) return err } func (a *workspaceImpl) Export(ctx context.Context, request ExportRequest) (*ExportResponse, error) { var exportResponse ExportResponse path := "/api/2.0/workspace/export" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &exportResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &exportResponse) return &exportResponse, err } func (a *workspaceImpl) GetPermissionLevels(ctx context.Context, request GetWorkspaceObjectPermissionLevelsRequest) (*GetWorkspaceObjectPermissionLevelsResponse, error) { var getWorkspaceObjectPermissionLevelsResponse GetWorkspaceObjectPermissionLevelsResponse path := fmt.Sprintf("/api/2.0/permissions/%v/%v/permissionLevels", request.WorkspaceObjectType, request.WorkspaceObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &getWorkspaceObjectPermissionLevelsResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getWorkspaceObjectPermissionLevelsResponse) return &getWorkspaceObjectPermissionLevelsResponse, err } func (a *workspaceImpl) GetPermissions(ctx context.Context, request GetWorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { var workspaceObjectPermissions WorkspaceObjectPermissions path := fmt.Sprintf("/api/2.0/permissions/%v/%v", request.WorkspaceObjectType, request.WorkspaceObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &workspaceObjectPermissions) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &workspaceObjectPermissions) return &workspaceObjectPermissions, err } func (a *workspaceImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*ObjectInfo, error) { var objectInfo ObjectInfo path := "/api/2.0/workspace/get-status" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &objectInfo) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &objectInfo) return &objectInfo, err } func (a *workspaceImpl) Import(ctx context.Context, request Import) error { var importResponse ImportResponse path := "/api/2.0/workspace/import" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &importResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &importResponse) return err } func (a *workspaceImpl) List(ctx context.Context, request ListWorkspaceRequest) (*ListResponse, error) { var listResponse ListResponse path := "/api/2.0/workspace/list" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, request, &listResponse) + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listResponse) return &listResponse, err } func (a *workspaceImpl) Mkdirs(ctx context.Context, request Mkdirs) error { var mkdirsResponse MkdirsResponse path := "/api/2.0/workspace/mkdirs" + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, request, &mkdirsResponse) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &mkdirsResponse) return err } func (a *workspaceImpl) SetPermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { var workspaceObjectPermissions WorkspaceObjectPermissions path := fmt.Sprintf("/api/2.0/permissions/%v/%v", request.WorkspaceObjectType, request.WorkspaceObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPut, path, headers, request, &workspaceObjectPermissions) + err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &workspaceObjectPermissions) return &workspaceObjectPermissions, err } func (a *workspaceImpl) UpdatePermissions(ctx context.Context, request WorkspaceObjectPermissionsRequest) (*WorkspaceObjectPermissions, error) { var workspaceObjectPermissions WorkspaceObjectPermissions path := fmt.Sprintf("/api/2.0/permissions/%v/%v", request.WorkspaceObjectType, request.WorkspaceObjectId) + queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, request, &workspaceObjectPermissions) + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &workspaceObjectPermissions) return &workspaceObjectPermissions, err } From 113454080f34e4da04782895ea5d61101bf2b425 Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Tue, 21 Jan 2025 13:33:06 +0100 Subject: [PATCH 05/54] [Internal] Add download target to MakeFile (#1125) ## What changes are proposed in this pull request? This PR introduces a download target which is a wrapper around `go mod download`. The rationale behind introducing this target is that we can change the underlying implementation of this target for `dev/sdk-mod` branch in which there is no go.mod at the root repository so we need to either manually run download for all the go modules or introduce a make target like [this](https://github.com/databricks/databricks-sdk-go/pull/1123/files#diff-76ed074a9305c04054cdebb9e9aad2d818052b07091de1f20cad0bbac34ffb52). One could say that we can just add this target for SDK mod but we need to update integration test workflows accordingly. I think this is a better way than having an if/else statement in the workflow. ## How is this tested? Locally run `make download` --- Makefile | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/Makefile b/Makefile index adbac53c8..326fae0b0 100644 --- a/Makefile +++ b/Makefile @@ -28,8 +28,12 @@ vendor: @echo "✓ Filling vendor folder with library code ..." @go mod vendor +download: + @echo "✓ Downloading dependencies ..." + @go mod download + doc: @echo "Open http://localhost:6060" @go run golang.org/x/tools/cmd/godoc@latest -http=localhost:6060 -.PHONY: fmt vendor fmt coverage test lint doc +.PHONY: fmt vendor fmt coverage test lint doc download From 28ff749ee2271172ceda01aaaa6e997e8c2aebd7 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 22 Jan 2025 17:03:54 +0100 Subject: [PATCH 06/54] [Release] Release v0.56.0 (#1134) ### Bug Fixes * Support Query parameters for all HTTP operations ([#1124](https://github.com/databricks/databricks-sdk-go/pull/1124)). ### Internal Changes * Add download target to MakeFile ([#1125](https://github.com/databricks/databricks-sdk-go/pull/1125)). * Delete examples/mocking module ([#1126](https://github.com/databricks/databricks-sdk-go/pull/1126)). * Scope the traversing directory in the Recursive list workspace test ([#1120](https://github.com/databricks/databricks-sdk-go/pull/1120)). ### API Changes: * Added [w.AccessControl](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/iam#AccessControlAPI) workspace-level service. * Added `HttpRequest` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service. * Added `ReviewState`, `Reviews` and `RunnerCollaborators` fields for [cleanrooms.CleanRoomAssetNotebook](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetNotebook). * Added `CleanRoomsNotebookOutput` field for [jobs.RunOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunOutput). * Added `RunAsRepl` field for [jobs.SparkJarTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SparkJarTask). * Added `Scopes` field for [oauth2.UpdateCustomAppIntegration](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#UpdateCustomAppIntegration). * Added `Contents` field for [serving.GetOpenApiResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#GetOpenApiResponse). * Added `Activated`, `ActivationUrl`, `AuthenticationType`, `Cloud`, `Comment`, `CreatedAt`, `CreatedBy`, `DataRecipientGlobalMetastoreId`, `IpAccessList`, `MetastoreId`, `Name`, `Owner`, `PropertiesKvpairs`, `Region`, `SharingCode`, `Tokens`, `UpdatedAt` and `UpdatedBy` fields for [sharing.RecipientInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientInfo). * Added `ExpirationTime` field for [sharing.RecipientInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientInfo). * Added `Pending` enum value for [cleanrooms.CleanRoomAssetStatusEnum](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetStatusEnum). * Added `AddNodesFailed`, `AutomaticClusterUpdate`, `AutoscalingBackoff` and `AutoscalingFailed` enum values for [compute.EventType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EventType). * Added `PendingWarehouse` enum value for [dashboards.MessageStatus](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageStatus). * Added `Cpu`, `GpuLarge`, `GpuMedium`, `GpuSmall` and `MultigpuMedium` enum values for [serving.ServingModelWorkloadType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingModelWorkloadType). * Changed `Update` method for [w.Recipients](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientsAPI) workspace-level service to return [sharing.RecipientInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientInfo). * Changed `Update` method for [w.Recipients](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientsAPI) workspace-level service return type to become non-empty. * Changed `Update` method for [w.Recipients](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientsAPI) workspace-level service to type `Update` method for [w.Recipients](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientsAPI) workspace-level service. * Changed `Create` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service with new required argument order. * Changed `GetOpenApi` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service return type to become non-empty. * Changed `Patch` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service to type `Patch` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service. * Changed `Patch` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service to return [serving.EndpointTags](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#EndpointTags). * Changed [serving.EndpointTagList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#EndpointTagList) to. * Changed `CollaboratorAlias` field for [cleanrooms.CleanRoomCollaborator](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomCollaborator) to be required. * Changed `CollaboratorAlias` field for [cleanrooms.CleanRoomCollaborator](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomCollaborator) to be required. * Changed `Behavior` field for [serving.AiGatewayGuardrailPiiBehavior](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AiGatewayGuardrailPiiBehavior) to no longer be required. * Changed `Behavior` field for [serving.AiGatewayGuardrailPiiBehavior](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AiGatewayGuardrailPiiBehavior) to no longer be required. * Changed `Config` field for [serving.CreateServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#CreateServingEndpoint) to no longer be required. * Changed `ProjectId` and `Region` fields for [serving.GoogleCloudVertexAiConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#GoogleCloudVertexAiConfig) to be required. * Changed `ProjectId` and `Region` fields for [serving.GoogleCloudVertexAiConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#GoogleCloudVertexAiConfig) to be required. * Changed `WorkloadType` field for [serving.ServedEntityInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedEntityInput) to type [serving.ServingModelWorkloadType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingModelWorkloadType). * Changed `WorkloadType` field for [serving.ServedEntityOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedEntityOutput) to type [serving.ServingModelWorkloadType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingModelWorkloadType). * Changed `WorkloadType` field for [serving.ServedModelOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelOutput) to type [serving.ServingModelWorkloadType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingModelWorkloadType). * Changed waiter for [ServingEndpointsAPI.Create](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI.Create). * Changed waiter for [ServingEndpointsAPI.UpdateConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI.UpdateConfig). OpenAPI SHA: 0be1b914249781b5e903b7676fd02255755bc851, Date: 2025-01-22 --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + CHANGELOG.md | 53 ++ experimental/mocks/mock_workspace_client.go | 9 + .../iam/mock_access_control_interface.go | 96 ++++ .../mock_serving_endpoints_interface.go | 129 ++++- .../sharing/mock_recipients_interface.go | 87 +--- internal/sharing_test.go | 2 +- service/cleanrooms/model.go | 64 ++- service/compute/model.go | 12 +- service/dashboards/model.go | 18 +- service/files/api.go | 7 +- service/files/interface.go | 7 +- service/iam/api.go | 21 +- service/iam/impl.go | 15 + service/iam/interface.go | 7 + service/iam/model.go | 104 ++++ service/jobs/model.go | 46 +- service/oauth2/model.go | 4 + service/pkg.go | 3 + service/serving/api.go | 13 +- service/serving/impl.go | 25 +- service/serving/interface.go | 10 +- service/serving/model.go | 462 ++++++++++-------- service/sharing/api.go | 38 +- service/sharing/impl.go | 8 +- service/sharing/interface.go | 4 +- service/sharing/model.go | 60 +-- service/sharing/recipients_usage_test.go | 2 +- version/version.go | 2 +- workspace_client.go | 11 +- 31 files changed, 918 insertions(+), 404 deletions(-) create mode 100644 experimental/mocks/service/iam/mock_access_control_interface.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index dfe78790a..588cf9d63 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -779817ed8d63031f5ea761fbd25ee84f38feec0d \ No newline at end of file +0be1b914249781b5e903b7676fd02255755bc851 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 91ba6180a..1f04b9a10 100644 --- a/.gitattributes +++ b/.gitattributes @@ -49,6 +49,7 @@ experimental/mocks/service/dashboards/mock_genie_interface.go linguist-generated experimental/mocks/service/dashboards/mock_lakeview_interface.go linguist-generated=true experimental/mocks/service/files/mock_dbfs_interface.go linguist-generated=true experimental/mocks/service/files/mock_files_interface.go linguist-generated=true +experimental/mocks/service/iam/mock_access_control_interface.go linguist-generated=true experimental/mocks/service/iam/mock_account_access_control_interface.go linguist-generated=true experimental/mocks/service/iam/mock_account_access_control_proxy_interface.go linguist-generated=true experimental/mocks/service/iam/mock_account_groups_interface.go linguist-generated=true diff --git a/CHANGELOG.md b/CHANGELOG.md index a9036ffa8..61a339db8 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,58 @@ # Version changelog +## [Release] Release v0.56.0 + +### Bug Fixes + + * Support Query parameters for all HTTP operations ([#1124](https://github.com/databricks/databricks-sdk-go/pull/1124)). + + +### Internal Changes + + * Add download target to MakeFile ([#1125](https://github.com/databricks/databricks-sdk-go/pull/1125)). + * Delete examples/mocking module ([#1126](https://github.com/databricks/databricks-sdk-go/pull/1126)). + * Scope the traversing directory in the Recursive list workspace test ([#1120](https://github.com/databricks/databricks-sdk-go/pull/1120)). + + +### API Changes: + + * Added [w.AccessControl](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/iam#AccessControlAPI) workspace-level service. + * Added `HttpRequest` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service. + * Added `ReviewState`, `Reviews` and `RunnerCollaborators` fields for [cleanrooms.CleanRoomAssetNotebook](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetNotebook). + * Added `CleanRoomsNotebookOutput` field for [jobs.RunOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunOutput). + * Added `RunAsRepl` field for [jobs.SparkJarTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SparkJarTask). + * Added `Scopes` field for [oauth2.UpdateCustomAppIntegration](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#UpdateCustomAppIntegration). + * Added `Contents` field for [serving.GetOpenApiResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#GetOpenApiResponse). + * Added `Activated`, `ActivationUrl`, `AuthenticationType`, `Cloud`, `Comment`, `CreatedAt`, `CreatedBy`, `DataRecipientGlobalMetastoreId`, `IpAccessList`, `MetastoreId`, `Name`, `Owner`, `PropertiesKvpairs`, `Region`, `SharingCode`, `Tokens`, `UpdatedAt` and `UpdatedBy` fields for [sharing.RecipientInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientInfo). + * Added `ExpirationTime` field for [sharing.RecipientInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientInfo). + * Added `Pending` enum value for [cleanrooms.CleanRoomAssetStatusEnum](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetStatusEnum). + * Added `AddNodesFailed`, `AutomaticClusterUpdate`, `AutoscalingBackoff` and `AutoscalingFailed` enum values for [compute.EventType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EventType). + * Added `PendingWarehouse` enum value for [dashboards.MessageStatus](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageStatus). + * Added `Cpu`, `GpuLarge`, `GpuMedium`, `GpuSmall` and `MultigpuMedium` enum values for [serving.ServingModelWorkloadType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingModelWorkloadType). + * Changed `Update` method for [w.Recipients](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientsAPI) workspace-level service to return [sharing.RecipientInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientInfo). + * Changed `Update` method for [w.Recipients](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientsAPI) workspace-level service return type to become non-empty. + * Changed `Update` method for [w.Recipients](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientsAPI) workspace-level service to type `Update` method for [w.Recipients](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#RecipientsAPI) workspace-level service. + * Changed `Create` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service with new required argument order. + * Changed `GetOpenApi` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service return type to become non-empty. + * Changed `Patch` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service to type `Patch` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service. + * Changed `Patch` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service to return [serving.EndpointTags](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#EndpointTags). + * Changed [serving.EndpointTagList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#EndpointTagList) to. + * Changed `CollaboratorAlias` field for [cleanrooms.CleanRoomCollaborator](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomCollaborator) to be required. + * Changed `CollaboratorAlias` field for [cleanrooms.CleanRoomCollaborator](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomCollaborator) to be required. + * Changed `Behavior` field for [serving.AiGatewayGuardrailPiiBehavior](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AiGatewayGuardrailPiiBehavior) to no longer be required. + * Changed `Behavior` field for [serving.AiGatewayGuardrailPiiBehavior](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AiGatewayGuardrailPiiBehavior) to no longer be required. + * Changed `Config` field for [serving.CreateServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#CreateServingEndpoint) to no longer be required. + * Changed `ProjectId` and `Region` fields for [serving.GoogleCloudVertexAiConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#GoogleCloudVertexAiConfig) to be required. + * Changed `ProjectId` and `Region` fields for [serving.GoogleCloudVertexAiConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#GoogleCloudVertexAiConfig) to be required. + * Changed `WorkloadType` field for [serving.ServedEntityInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedEntityInput) to type [serving.ServingModelWorkloadType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingModelWorkloadType). + * Changed `WorkloadType` field for [serving.ServedEntityOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedEntityOutput) to type [serving.ServingModelWorkloadType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingModelWorkloadType). + * Changed `WorkloadType` field for [serving.ServedModelOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelOutput) to type [serving.ServingModelWorkloadType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingModelWorkloadType). + * Changed waiter for [ServingEndpointsAPI.Create](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI.Create). + * Changed waiter for [ServingEndpointsAPI.UpdateConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI.UpdateConfig). + + +OpenAPI SHA: 0be1b914249781b5e903b7676fd02255755bc851, Date: 2025-01-22 + ## [Release] Release v0.55.0 ### Internal Changes diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index 20f8533c9..c46e8663d 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -41,6 +41,7 @@ func NewMockWorkspaceClient(t interface { WorkspaceClient: &databricks.WorkspaceClient{ Config: nil, + AccessControl: iam.NewMockAccessControlInterface(t), AccountAccessControlProxy: iam.NewMockAccountAccessControlProxyInterface(t), Alerts: sql.NewMockAlertsInterface(t), AlertsLegacy: sql.NewMockAlertsLegacyInterface(t), @@ -242,6 +243,14 @@ func (m *MockWorkspaceClient) GetMockRestrictWorkspaceAdminsAPI() *settings.Mock return api } +func (m *MockWorkspaceClient) GetMockAccessControlAPI() *iam.MockAccessControlInterface { + api, ok := m.WorkspaceClient.AccessControl.(*iam.MockAccessControlInterface) + if !ok { + panic(fmt.Sprintf("expected AccessControl to be *iam.MockAccessControlInterface, actual was %T", m.WorkspaceClient.AccessControl)) + } + return api +} + func (m *MockWorkspaceClient) GetMockAccountAccessControlProxyAPI() *iam.MockAccountAccessControlProxyInterface { api, ok := m.WorkspaceClient.AccountAccessControlProxy.(*iam.MockAccountAccessControlProxyInterface) if !ok { diff --git a/experimental/mocks/service/iam/mock_access_control_interface.go b/experimental/mocks/service/iam/mock_access_control_interface.go new file mode 100644 index 000000000..67324947c --- /dev/null +++ b/experimental/mocks/service/iam/mock_access_control_interface.go @@ -0,0 +1,96 @@ +// Code generated by mockery v2.43.0. DO NOT EDIT. + +package iam + +import ( + context "context" + + iam "github.com/databricks/databricks-sdk-go/service/iam" + mock "github.com/stretchr/testify/mock" +) + +// MockAccessControlInterface is an autogenerated mock type for the AccessControlInterface type +type MockAccessControlInterface struct { + mock.Mock +} + +type MockAccessControlInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockAccessControlInterface) EXPECT() *MockAccessControlInterface_Expecter { + return &MockAccessControlInterface_Expecter{mock: &_m.Mock} +} + +// CheckPolicy provides a mock function with given fields: ctx, request +func (_m *MockAccessControlInterface) CheckPolicy(ctx context.Context, request iam.CheckPolicyRequest) (*iam.CheckPolicyResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for CheckPolicy") + } + + var r0 *iam.CheckPolicyResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, iam.CheckPolicyRequest) (*iam.CheckPolicyResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, iam.CheckPolicyRequest) *iam.CheckPolicyResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*iam.CheckPolicyResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, iam.CheckPolicyRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAccessControlInterface_CheckPolicy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CheckPolicy' +type MockAccessControlInterface_CheckPolicy_Call struct { + *mock.Call +} + +// CheckPolicy is a helper method to define mock.On call +// - ctx context.Context +// - request iam.CheckPolicyRequest +func (_e *MockAccessControlInterface_Expecter) CheckPolicy(ctx interface{}, request interface{}) *MockAccessControlInterface_CheckPolicy_Call { + return &MockAccessControlInterface_CheckPolicy_Call{Call: _e.mock.On("CheckPolicy", ctx, request)} +} + +func (_c *MockAccessControlInterface_CheckPolicy_Call) Run(run func(ctx context.Context, request iam.CheckPolicyRequest)) *MockAccessControlInterface_CheckPolicy_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(iam.CheckPolicyRequest)) + }) + return _c +} + +func (_c *MockAccessControlInterface_CheckPolicy_Call) Return(_a0 *iam.CheckPolicyResponse, _a1 error) *MockAccessControlInterface_CheckPolicy_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAccessControlInterface_CheckPolicy_Call) RunAndReturn(run func(context.Context, iam.CheckPolicyRequest) (*iam.CheckPolicyResponse, error)) *MockAccessControlInterface_CheckPolicy_Call { + _c.Call.Return(run) + return _c +} + +// NewMockAccessControlInterface creates a new instance of MockAccessControlInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockAccessControlInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockAccessControlInterface { + mock := &MockAccessControlInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/serving/mock_serving_endpoints_interface.go b/experimental/mocks/service/serving/mock_serving_endpoints_interface.go index 782f10dfc..84c106815 100644 --- a/experimental/mocks/service/serving/mock_serving_endpoints_interface.go +++ b/experimental/mocks/service/serving/mock_serving_endpoints_interface.go @@ -611,21 +611,33 @@ func (_c *MockServingEndpointsInterface_GetByName_Call) RunAndReturn(run func(co } // GetOpenApi provides a mock function with given fields: ctx, request -func (_m *MockServingEndpointsInterface) GetOpenApi(ctx context.Context, request serving.GetOpenApiRequest) error { +func (_m *MockServingEndpointsInterface) GetOpenApi(ctx context.Context, request serving.GetOpenApiRequest) (*serving.GetOpenApiResponse, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for GetOpenApi") } - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, serving.GetOpenApiRequest) error); ok { + var r0 *serving.GetOpenApiResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, serving.GetOpenApiRequest) (*serving.GetOpenApiResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, serving.GetOpenApiRequest) *serving.GetOpenApiResponse); ok { r0 = rf(ctx, request) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*serving.GetOpenApiResponse) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, serving.GetOpenApiRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // MockServingEndpointsInterface_GetOpenApi_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOpenApi' @@ -647,32 +659,44 @@ func (_c *MockServingEndpointsInterface_GetOpenApi_Call) Run(run func(ctx contex return _c } -func (_c *MockServingEndpointsInterface_GetOpenApi_Call) Return(_a0 error) *MockServingEndpointsInterface_GetOpenApi_Call { - _c.Call.Return(_a0) +func (_c *MockServingEndpointsInterface_GetOpenApi_Call) Return(_a0 *serving.GetOpenApiResponse, _a1 error) *MockServingEndpointsInterface_GetOpenApi_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *MockServingEndpointsInterface_GetOpenApi_Call) RunAndReturn(run func(context.Context, serving.GetOpenApiRequest) error) *MockServingEndpointsInterface_GetOpenApi_Call { +func (_c *MockServingEndpointsInterface_GetOpenApi_Call) RunAndReturn(run func(context.Context, serving.GetOpenApiRequest) (*serving.GetOpenApiResponse, error)) *MockServingEndpointsInterface_GetOpenApi_Call { _c.Call.Return(run) return _c } // GetOpenApiByName provides a mock function with given fields: ctx, name -func (_m *MockServingEndpointsInterface) GetOpenApiByName(ctx context.Context, name string) error { +func (_m *MockServingEndpointsInterface) GetOpenApiByName(ctx context.Context, name string) (*serving.GetOpenApiResponse, error) { ret := _m.Called(ctx, name) if len(ret) == 0 { panic("no return value specified for GetOpenApiByName") } - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + var r0 *serving.GetOpenApiResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*serving.GetOpenApiResponse, error)); ok { + return rf(ctx, name) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *serving.GetOpenApiResponse); ok { r0 = rf(ctx, name) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*serving.GetOpenApiResponse) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // MockServingEndpointsInterface_GetOpenApiByName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetOpenApiByName' @@ -694,12 +718,12 @@ func (_c *MockServingEndpointsInterface_GetOpenApiByName_Call) Run(run func(ctx return _c } -func (_c *MockServingEndpointsInterface_GetOpenApiByName_Call) Return(_a0 error) *MockServingEndpointsInterface_GetOpenApiByName_Call { - _c.Call.Return(_a0) +func (_c *MockServingEndpointsInterface_GetOpenApiByName_Call) Return(_a0 *serving.GetOpenApiResponse, _a1 error) *MockServingEndpointsInterface_GetOpenApiByName_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *MockServingEndpointsInterface_GetOpenApiByName_Call) RunAndReturn(run func(context.Context, string) error) *MockServingEndpointsInterface_GetOpenApiByName_Call { +func (_c *MockServingEndpointsInterface_GetOpenApiByName_Call) RunAndReturn(run func(context.Context, string) (*serving.GetOpenApiResponse, error)) *MockServingEndpointsInterface_GetOpenApiByName_Call { _c.Call.Return(run) return _c } @@ -940,6 +964,65 @@ func (_c *MockServingEndpointsInterface_GetPermissionsByServingEndpointId_Call) return _c } +// HttpRequest provides a mock function with given fields: ctx, request +func (_m *MockServingEndpointsInterface) HttpRequest(ctx context.Context, request serving.ExternalFunctionRequest) (*serving.ExternalFunctionResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for HttpRequest") + } + + var r0 *serving.ExternalFunctionResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, serving.ExternalFunctionRequest) (*serving.ExternalFunctionResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, serving.ExternalFunctionRequest) *serving.ExternalFunctionResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*serving.ExternalFunctionResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, serving.ExternalFunctionRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockServingEndpointsInterface_HttpRequest_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'HttpRequest' +type MockServingEndpointsInterface_HttpRequest_Call struct { + *mock.Call +} + +// HttpRequest is a helper method to define mock.On call +// - ctx context.Context +// - request serving.ExternalFunctionRequest +func (_e *MockServingEndpointsInterface_Expecter) HttpRequest(ctx interface{}, request interface{}) *MockServingEndpointsInterface_HttpRequest_Call { + return &MockServingEndpointsInterface_HttpRequest_Call{Call: _e.mock.On("HttpRequest", ctx, request)} +} + +func (_c *MockServingEndpointsInterface_HttpRequest_Call) Run(run func(ctx context.Context, request serving.ExternalFunctionRequest)) *MockServingEndpointsInterface_HttpRequest_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(serving.ExternalFunctionRequest)) + }) + return _c +} + +func (_c *MockServingEndpointsInterface_HttpRequest_Call) Return(_a0 *serving.ExternalFunctionResponse, _a1 error) *MockServingEndpointsInterface_HttpRequest_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockServingEndpointsInterface_HttpRequest_Call) RunAndReturn(run func(context.Context, serving.ExternalFunctionRequest) (*serving.ExternalFunctionResponse, error)) *MockServingEndpointsInterface_HttpRequest_Call { + _c.Call.Return(run) + return _c +} + // List provides a mock function with given fields: ctx func (_m *MockServingEndpointsInterface) List(ctx context.Context) listing.Iterator[serving.ServingEndpoint] { ret := _m.Called(ctx) @@ -1166,23 +1249,23 @@ func (_c *MockServingEndpointsInterface_LogsByNameAndServedModelName_Call) RunAn } // Patch provides a mock function with given fields: ctx, request -func (_m *MockServingEndpointsInterface) Patch(ctx context.Context, request serving.PatchServingEndpointTags) ([]serving.EndpointTag, error) { +func (_m *MockServingEndpointsInterface) Patch(ctx context.Context, request serving.PatchServingEndpointTags) (*serving.EndpointTags, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for Patch") } - var r0 []serving.EndpointTag + var r0 *serving.EndpointTags var r1 error - if rf, ok := ret.Get(0).(func(context.Context, serving.PatchServingEndpointTags) ([]serving.EndpointTag, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, serving.PatchServingEndpointTags) (*serving.EndpointTags, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, serving.PatchServingEndpointTags) []serving.EndpointTag); ok { + if rf, ok := ret.Get(0).(func(context.Context, serving.PatchServingEndpointTags) *serving.EndpointTags); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]serving.EndpointTag) + r0 = ret.Get(0).(*serving.EndpointTags) } } @@ -1214,12 +1297,12 @@ func (_c *MockServingEndpointsInterface_Patch_Call) Run(run func(ctx context.Con return _c } -func (_c *MockServingEndpointsInterface_Patch_Call) Return(_a0 []serving.EndpointTag, _a1 error) *MockServingEndpointsInterface_Patch_Call { +func (_c *MockServingEndpointsInterface_Patch_Call) Return(_a0 *serving.EndpointTags, _a1 error) *MockServingEndpointsInterface_Patch_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockServingEndpointsInterface_Patch_Call) RunAndReturn(run func(context.Context, serving.PatchServingEndpointTags) ([]serving.EndpointTag, error)) *MockServingEndpointsInterface_Patch_Call { +func (_c *MockServingEndpointsInterface_Patch_Call) RunAndReturn(run func(context.Context, serving.PatchServingEndpointTags) (*serving.EndpointTags, error)) *MockServingEndpointsInterface_Patch_Call { _c.Call.Return(run) return _c } diff --git a/experimental/mocks/service/sharing/mock_recipients_interface.go b/experimental/mocks/service/sharing/mock_recipients_interface.go index 7f724bb2d..9a4a4dec4 100644 --- a/experimental/mocks/service/sharing/mock_recipients_interface.go +++ b/experimental/mocks/service/sharing/mock_recipients_interface.go @@ -403,65 +403,6 @@ func (_c *MockRecipientsInterface_ListAll_Call) RunAndReturn(run func(context.Co return _c } -// RecipientInfoNameToMetastoreIdMap provides a mock function with given fields: ctx, request -func (_m *MockRecipientsInterface) RecipientInfoNameToMetastoreIdMap(ctx context.Context, request sharing.ListRecipientsRequest) (map[string]string, error) { - ret := _m.Called(ctx, request) - - if len(ret) == 0 { - panic("no return value specified for RecipientInfoNameToMetastoreIdMap") - } - - var r0 map[string]string - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, sharing.ListRecipientsRequest) (map[string]string, error)); ok { - return rf(ctx, request) - } - if rf, ok := ret.Get(0).(func(context.Context, sharing.ListRecipientsRequest) map[string]string); ok { - r0 = rf(ctx, request) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, sharing.ListRecipientsRequest) error); ok { - r1 = rf(ctx, request) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'RecipientInfoNameToMetastoreIdMap' -type MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call struct { - *mock.Call -} - -// RecipientInfoNameToMetastoreIdMap is a helper method to define mock.On call -// - ctx context.Context -// - request sharing.ListRecipientsRequest -func (_e *MockRecipientsInterface_Expecter) RecipientInfoNameToMetastoreIdMap(ctx interface{}, request interface{}) *MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call { - return &MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call{Call: _e.mock.On("RecipientInfoNameToMetastoreIdMap", ctx, request)} -} - -func (_c *MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call) Run(run func(ctx context.Context, request sharing.ListRecipientsRequest)) *MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(sharing.ListRecipientsRequest)) - }) - return _c -} - -func (_c *MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call) Return(_a0 map[string]string, _a1 error) *MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call) RunAndReturn(run func(context.Context, sharing.ListRecipientsRequest) (map[string]string, error)) *MockRecipientsInterface_RecipientInfoNameToMetastoreIdMap_Call { - _c.Call.Return(run) - return _c -} - // RotateToken provides a mock function with given fields: ctx, request func (_m *MockRecipientsInterface) RotateToken(ctx context.Context, request sharing.RotateRecipientToken) (*sharing.RecipientInfo, error) { ret := _m.Called(ctx, request) @@ -640,21 +581,33 @@ func (_c *MockRecipientsInterface_SharePermissionsByName_Call) RunAndReturn(run } // Update provides a mock function with given fields: ctx, request -func (_m *MockRecipientsInterface) Update(ctx context.Context, request sharing.UpdateRecipient) error { +func (_m *MockRecipientsInterface) Update(ctx context.Context, request sharing.UpdateRecipient) (*sharing.RecipientInfo, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for Update") } - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, sharing.UpdateRecipient) error); ok { + var r0 *sharing.RecipientInfo + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sharing.UpdateRecipient) (*sharing.RecipientInfo, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sharing.UpdateRecipient) *sharing.RecipientInfo); ok { r0 = rf(ctx, request) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sharing.RecipientInfo) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, sharing.UpdateRecipient) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // MockRecipientsInterface_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' @@ -676,12 +629,12 @@ func (_c *MockRecipientsInterface_Update_Call) Run(run func(ctx context.Context, return _c } -func (_c *MockRecipientsInterface_Update_Call) Return(_a0 error) *MockRecipientsInterface_Update_Call { - _c.Call.Return(_a0) +func (_c *MockRecipientsInterface_Update_Call) Return(_a0 *sharing.RecipientInfo, _a1 error) *MockRecipientsInterface_Update_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *MockRecipientsInterface_Update_Call) RunAndReturn(run func(context.Context, sharing.UpdateRecipient) error) *MockRecipientsInterface_Update_Call { +func (_c *MockRecipientsInterface_Update_Call) RunAndReturn(run func(context.Context, sharing.UpdateRecipient) (*sharing.RecipientInfo, error)) *MockRecipientsInterface_Update_Call { _c.Call.Return(run) return _c } diff --git a/internal/sharing_test.go b/internal/sharing_test.go index 93cfa2425..50c91ab08 100644 --- a/internal/sharing_test.go +++ b/internal/sharing_test.go @@ -104,7 +104,7 @@ func TestUcAccRecipients(t *testing.T) { err := w.Recipients.DeleteByName(ctx, created.Name) require.NoError(t, err) }) - err = w.Recipients.Update(ctx, sharing.UpdateRecipient{ + _, err = w.Recipients.Update(ctx, sharing.UpdateRecipient{ Name: created.Name, Comment: RandomName("comment "), }) diff --git a/service/cleanrooms/model.go b/service/cleanrooms/model.go index 61b7c4368..cf4a225a5 100755 --- a/service/cleanrooms/model.go +++ b/service/cleanrooms/model.go @@ -196,6 +196,12 @@ type CleanRoomAssetNotebook struct { // Base 64 representation of the notebook contents. This is the same format // as returned by :method:workspace/export with the format of **HTML**. NotebookContent string `json:"notebook_content,omitempty"` + // top-level status derived from all reviews + ReviewState CleanRoomNotebookReviewNotebookReviewState `json:"review_state,omitempty"` + // All existing approvals or rejections + Reviews []CleanRoomNotebookReview `json:"reviews,omitempty"` + // collaborators that can run the notebook + RunnerCollaborators []CleanRoomCollaborator `json:"runner_collaborators,omitempty"` ForceSendFields []string `json:"-"` } @@ -212,6 +218,8 @@ type CleanRoomAssetStatusEnum string const CleanRoomAssetStatusEnumActive CleanRoomAssetStatusEnum = `ACTIVE` +const CleanRoomAssetStatusEnumPending CleanRoomAssetStatusEnum = `PENDING` + const CleanRoomAssetStatusEnumPermissionDenied CleanRoomAssetStatusEnum = `PERMISSION_DENIED` // String representation for [fmt.Print] @@ -222,11 +230,11 @@ func (f *CleanRoomAssetStatusEnum) String() string { // Set raw string value and validate it against allowed values func (f *CleanRoomAssetStatusEnum) Set(v string) error { switch v { - case `ACTIVE`, `PERMISSION_DENIED`: + case `ACTIVE`, `PENDING`, `PERMISSION_DENIED`: *f = CleanRoomAssetStatusEnum(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ACTIVE", "PERMISSION_DENIED"`, v) + return fmt.Errorf(`value "%s" is not one of "ACTIVE", "PENDING", "PERMISSION_DENIED"`, v) } } @@ -304,7 +312,7 @@ type CleanRoomCollaborator struct { // requirements]. // // [UC securable naming requirements]: https://docs.databricks.com/en/data-governance/unity-catalog/index.html#securable-object-naming-requirements - CollaboratorAlias string `json:"collaborator_alias,omitempty"` + CollaboratorAlias string `json:"collaborator_alias"` // Generated display name for the collaborator. In the case of a single // metastore clean room, it is the clean room name. For x-metastore clean // rooms, it is the organization name of the metastore. It is not restricted @@ -338,6 +346,56 @@ func (s CleanRoomCollaborator) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type CleanRoomNotebookReview struct { + // review comment + Comment string `json:"comment,omitempty"` + // timestamp of when the review was submitted + CreatedAtMillis int64 `json:"created_at_millis,omitempty"` + // review outcome + ReviewState CleanRoomNotebookReviewNotebookReviewState `json:"review_state,omitempty"` + // collaborator alias of the reviewer + ReviewerCollaboratorAlias string `json:"reviewer_collaborator_alias,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CleanRoomNotebookReview) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CleanRoomNotebookReview) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CleanRoomNotebookReviewNotebookReviewState string + +const CleanRoomNotebookReviewNotebookReviewStateApproved CleanRoomNotebookReviewNotebookReviewState = `APPROVED` + +const CleanRoomNotebookReviewNotebookReviewStatePending CleanRoomNotebookReviewNotebookReviewState = `PENDING` + +const CleanRoomNotebookReviewNotebookReviewStateRejected CleanRoomNotebookReviewNotebookReviewState = `REJECTED` + +// String representation for [fmt.Print] +func (f *CleanRoomNotebookReviewNotebookReviewState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *CleanRoomNotebookReviewNotebookReviewState) Set(v string) error { + switch v { + case `APPROVED`, `PENDING`, `REJECTED`: + *f = CleanRoomNotebookReviewNotebookReviewState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "APPROVED", "PENDING", "REJECTED"`, v) + } +} + +// Type always returns CleanRoomNotebookReviewNotebookReviewState to satisfy [pflag.Value] interface +func (f *CleanRoomNotebookReviewNotebookReviewState) Type() string { + return "CleanRoomNotebookReviewNotebookReviewState" +} + // Stores information about a single task run. type CleanRoomNotebookTaskRun struct { // Job run info of the task in the runner's local workspace. This field is diff --git a/service/compute/model.go b/service/compute/model.go index 6414a21eb..5fe81ced7 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2732,6 +2732,14 @@ func (f *EventDetailsCause) Type() string { type EventType string +const EventTypeAddNodesFailed EventType = `ADD_NODES_FAILED` + +const EventTypeAutomaticClusterUpdate EventType = `AUTOMATIC_CLUSTER_UPDATE` + +const EventTypeAutoscalingBackoff EventType = `AUTOSCALING_BACKOFF` + +const EventTypeAutoscalingFailed EventType = `AUTOSCALING_FAILED` + const EventTypeAutoscalingStatsReport EventType = `AUTOSCALING_STATS_REPORT` const EventTypeCreating EventType = `CREATING` @@ -2790,11 +2798,11 @@ func (f *EventType) String() string { // Set raw string value and validate it against allowed values func (f *EventType) Set(v string) error { switch v { - case `AUTOSCALING_STATS_REPORT`, `CREATING`, `DBFS_DOWN`, `DID_NOT_EXPAND_DISK`, `DRIVER_HEALTHY`, `DRIVER_NOT_RESPONDING`, `DRIVER_UNAVAILABLE`, `EDITED`, `EXPANDED_DISK`, `FAILED_TO_EXPAND_DISK`, `INIT_SCRIPTS_FINISHED`, `INIT_SCRIPTS_STARTED`, `METASTORE_DOWN`, `NODES_LOST`, `NODE_BLACKLISTED`, `NODE_EXCLUDED_DECOMMISSIONED`, `PINNED`, `RESIZING`, `RESTARTING`, `RUNNING`, `SPARK_EXCEPTION`, `STARTING`, `TERMINATING`, `UNPINNED`, `UPSIZE_COMPLETED`: + case `ADD_NODES_FAILED`, `AUTOMATIC_CLUSTER_UPDATE`, `AUTOSCALING_BACKOFF`, `AUTOSCALING_FAILED`, `AUTOSCALING_STATS_REPORT`, `CREATING`, `DBFS_DOWN`, `DID_NOT_EXPAND_DISK`, `DRIVER_HEALTHY`, `DRIVER_NOT_RESPONDING`, `DRIVER_UNAVAILABLE`, `EDITED`, `EXPANDED_DISK`, `FAILED_TO_EXPAND_DISK`, `INIT_SCRIPTS_FINISHED`, `INIT_SCRIPTS_STARTED`, `METASTORE_DOWN`, `NODES_LOST`, `NODE_BLACKLISTED`, `NODE_EXCLUDED_DECOMMISSIONED`, `PINNED`, `RESIZING`, `RESTARTING`, `RUNNING`, `SPARK_EXCEPTION`, `STARTING`, `TERMINATING`, `UNPINNED`, `UPSIZE_COMPLETED`: *f = EventType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "AUTOSCALING_STATS_REPORT", "CREATING", "DBFS_DOWN", "DID_NOT_EXPAND_DISK", "DRIVER_HEALTHY", "DRIVER_NOT_RESPONDING", "DRIVER_UNAVAILABLE", "EDITED", "EXPANDED_DISK", "FAILED_TO_EXPAND_DISK", "INIT_SCRIPTS_FINISHED", "INIT_SCRIPTS_STARTED", "METASTORE_DOWN", "NODES_LOST", "NODE_BLACKLISTED", "NODE_EXCLUDED_DECOMMISSIONED", "PINNED", "RESIZING", "RESTARTING", "RUNNING", "SPARK_EXCEPTION", "STARTING", "TERMINATING", "UNPINNED", "UPSIZE_COMPLETED"`, v) + return fmt.Errorf(`value "%s" is not one of "ADD_NODES_FAILED", "AUTOMATIC_CLUSTER_UPDATE", "AUTOSCALING_BACKOFF", "AUTOSCALING_FAILED", "AUTOSCALING_STATS_REPORT", "CREATING", "DBFS_DOWN", "DID_NOT_EXPAND_DISK", "DRIVER_HEALTHY", "DRIVER_NOT_RESPONDING", "DRIVER_UNAVAILABLE", "EDITED", "EXPANDED_DISK", "FAILED_TO_EXPAND_DISK", "INIT_SCRIPTS_FINISHED", "INIT_SCRIPTS_STARTED", "METASTORE_DOWN", "NODES_LOST", "NODE_BLACKLISTED", "NODE_EXCLUDED_DECOMMISSIONED", "PINNED", "RESIZING", "RESTARTING", "RUNNING", "SPARK_EXCEPTION", "STARTING", "TERMINATING", "UNPINNED", "UPSIZE_COMPLETED"`, v) } } diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 310008e61..495e6b8db 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -324,8 +324,10 @@ type GenieMessage struct { // MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching // metadata from the data sources. * `FILTERING_CONTEXT`: Running smart // context step to determine relevant context. * `ASKING_AI`: Waiting for - // the LLM to respond to the users question. * `EXECUTING_QUERY`: Executing - // AI provided SQL query. Get the SQL query result by calling + // the LLM to respond to the users question. * `PENDING_WAREHOUSE`: Waiting + // for warehouse before the SQL query can start executing. * + // `EXECUTING_QUERY`: Executing AI provided SQL query. Get the SQL query + // result by calling // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. // **Important: The message status will stay in the `EXECUTING_QUERY` until // a client calls @@ -672,8 +674,9 @@ func (f *MessageErrorType) Type() string { // MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching // metadata from the data sources. * `FILTERING_CONTEXT`: Running smart context // step to determine relevant context. * `ASKING_AI`: Waiting for the LLM to -// respond to the users question. * `EXECUTING_QUERY`: Executing AI provided SQL -// query. Get the SQL query result by calling +// respond to the users question. * `PENDING_WAREHOUSE`: Waiting for warehouse +// before the SQL query can start executing. * `EXECUTING_QUERY`: Executing AI +// provided SQL query. Get the SQL query result by calling // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. // **Important: The message status will stay in the `EXECUTING_QUERY` until a // client calls [getMessageQueryResult](:method:genie/getMessageQueryResult)**. @@ -713,6 +716,9 @@ const MessageStatusFetchingMetadata MessageStatus = `FETCHING_METADATA` // Running smart context step to determine relevant context. const MessageStatusFilteringContext MessageStatus = `FILTERING_CONTEXT` +// Waiting for warehouse before the SQL query can start executing. +const MessageStatusPendingWarehouse MessageStatus = `PENDING_WAREHOUSE` + // SQL result is not available anymore. The user needs to execute the query // again. const MessageStatusQueryResultExpired MessageStatus = `QUERY_RESULT_EXPIRED` @@ -728,11 +734,11 @@ func (f *MessageStatus) String() string { // Set raw string value and validate it against allowed values func (f *MessageStatus) Set(v string) error { switch v { - case `ASKING_AI`, `CANCELLED`, `COMPLETED`, `EXECUTING_QUERY`, `FAILED`, `FETCHING_METADATA`, `FILTERING_CONTEXT`, `QUERY_RESULT_EXPIRED`, `SUBMITTED`: + case `ASKING_AI`, `CANCELLED`, `COMPLETED`, `EXECUTING_QUERY`, `FAILED`, `FETCHING_METADATA`, `FILTERING_CONTEXT`, `PENDING_WAREHOUSE`, `QUERY_RESULT_EXPIRED`, `SUBMITTED`: *f = MessageStatus(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ASKING_AI", "CANCELLED", "COMPLETED", "EXECUTING_QUERY", "FAILED", "FETCHING_METADATA", "FILTERING_CONTEXT", "QUERY_RESULT_EXPIRED", "SUBMITTED"`, v) + return fmt.Errorf(`value "%s" is not one of "ASKING_AI", "CANCELLED", "COMPLETED", "EXECUTING_QUERY", "FAILED", "FETCHING_METADATA", "FILTERING_CONTEXT", "PENDING_WAREHOUSE", "QUERY_RESULT_EXPIRED", "SUBMITTED"`, v) } } diff --git a/service/files/api.go b/service/files/api.go index c12a519dc..b57bd759f 100755 --- a/service/files/api.go +++ b/service/files/api.go @@ -449,10 +449,15 @@ func NewFiles(client *client.DatabricksClient) *FilesAPI { // // The Files API has two distinct endpoints, one for working with files // (`/fs/files`) and another one for working with directories -// (`/fs/directories`). Both endpoints, use the standard HTTP methods GET, HEAD, +// (`/fs/directories`). Both endpoints use the standard HTTP methods GET, HEAD, // PUT, and DELETE to manage files and directories specified using their URI // path. The path is always absolute. // +// Some Files API client features are currently experimental. To enable them, +// set `enable_experimental_files_api_client = True` in your configuration +// profile or use the environment variable +// `DATABRICKS_ENABLE_EXPERIMENTAL_FILES_API_CLIENT=True`. +// // [Unity Catalog volumes]: https://docs.databricks.com/en/connect/unity-catalog/volumes.html type FilesAPI struct { filesImpl diff --git a/service/files/interface.go b/service/files/interface.go index 5a573f9f3..c67524d4d 100755 --- a/service/files/interface.go +++ b/service/files/interface.go @@ -147,10 +147,15 @@ type DbfsService interface { // // The Files API has two distinct endpoints, one for working with files // (`/fs/files`) and another one for working with directories -// (`/fs/directories`). Both endpoints, use the standard HTTP methods GET, HEAD, +// (`/fs/directories`). Both endpoints use the standard HTTP methods GET, HEAD, // PUT, and DELETE to manage files and directories specified using their URI // path. The path is always absolute. // +// Some Files API client features are currently experimental. To enable them, +// set `enable_experimental_files_api_client = True` in your configuration +// profile or use the environment variable +// `DATABRICKS_ENABLE_EXPERIMENTAL_FILES_API_CLIENT=True`. +// // [Unity Catalog volumes]: https://docs.databricks.com/en/connect/unity-catalog/volumes.html type FilesService interface { diff --git a/service/iam/api.go b/service/iam/api.go index 388b0a2f9..008607f65 100755 --- a/service/iam/api.go +++ b/service/iam/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Access Control, Account Access Control Proxy, Account Groups, Account Service Principals, Account Users, Current User, Groups, Permission Migration, Permissions, Service Principals, Users, Workspace Assignment, etc. +// These APIs allow you to manage Access Control, Account Access Control, Account Access Control Proxy, Account Groups, Account Service Principals, Account Users, Current User, Groups, Permission Migration, Permissions, Service Principals, Users, Workspace Assignment, etc. package iam import ( @@ -12,6 +12,25 @@ import ( "github.com/databricks/databricks-sdk-go/useragent" ) +type AccessControlInterface interface { + + // Check access policy to a resource. + CheckPolicy(ctx context.Context, request CheckPolicyRequest) (*CheckPolicyResponse, error) +} + +func NewAccessControl(client *client.DatabricksClient) *AccessControlAPI { + return &AccessControlAPI{ + accessControlImpl: accessControlImpl{ + client: client, + }, + } +} + +// Rule based Access Control for Databricks Resources. +type AccessControlAPI struct { + accessControlImpl +} + type AccountAccessControlInterface interface { // Get assignable roles for a resource. diff --git a/service/iam/impl.go b/service/iam/impl.go index f41d601c3..8e9f360d3 100755 --- a/service/iam/impl.go +++ b/service/iam/impl.go @@ -10,6 +10,21 @@ import ( "github.com/databricks/databricks-sdk-go/client" ) +// unexported type that holds implementations of just AccessControl API methods +type accessControlImpl struct { + client *client.DatabricksClient +} + +func (a *accessControlImpl) CheckPolicy(ctx context.Context, request CheckPolicyRequest) (*CheckPolicyResponse, error) { + var checkPolicyResponse CheckPolicyResponse + path := "/api/2.0/access-control/check-policy-v2" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &checkPolicyResponse) + return &checkPolicyResponse, err +} + // unexported type that holds implementations of just AccountAccessControl API methods type accountAccessControlImpl struct { client *client.DatabricksClient diff --git a/service/iam/interface.go b/service/iam/interface.go index 0a76288c2..6b44c4503 100755 --- a/service/iam/interface.go +++ b/service/iam/interface.go @@ -6,6 +6,13 @@ import ( "context" ) +// Rule based Access Control for Databricks Resources. +type AccessControlService interface { + + // Check access policy to a resource. + CheckPolicy(ctx context.Context, request CheckPolicyRequest) (*CheckPolicyResponse, error) +} + // These APIs manage access rules on resources in an account. Currently, only // grant rules are supported. A grant rule specifies a role assigned to a set of // principals. A list of rules attached to a resource is called a rule set. diff --git a/service/iam/model.go b/service/iam/model.go index 05357a329..1dbdcb937 100755 --- a/service/iam/model.go +++ b/service/iam/model.go @@ -52,6 +52,57 @@ func (s AccessControlResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// represents an identity trying to access a resource - user or a service +// principal group can be a principal of a permission set assignment but an +// actor is always a user or a service principal +type Actor struct { + ActorId int64 `json:"actor_id,omitempty" url:"actor_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Actor) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Actor) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Check access policy to a resource +type CheckPolicyRequest struct { + Actor Actor `json:"-" url:"actor"` + + AuthzIdentity RequestAuthzIdentity `json:"-" url:"authz_identity"` + + ConsistencyToken ConsistencyToken `json:"-" url:"consistency_token"` + + Permission string `json:"-" url:"permission"` + // Ex: (servicePrincipal/use, + // accounts//servicePrincipals/) Ex: + // (servicePrincipal.ruleSet/update, + // accounts//servicePrincipals//ruleSets/default) + Resource string `json:"-" url:"resource"` + + ResourceInfo *ResourceInfo `json:"-" url:"resource_info,omitempty"` +} + +type CheckPolicyResponse struct { + ConsistencyToken ConsistencyToken `json:"consistency_token"` + + IsPermitted bool `json:"is_permitted,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CheckPolicyResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CheckPolicyResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type ComplexValue struct { Display string `json:"display,omitempty"` @@ -74,6 +125,10 @@ func (s ComplexValue) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type ConsistencyToken struct { + Value string `json:"value"` +} + // Delete a group type DeleteAccountGroupRequest struct { // Unique ID for a group in the Databricks account. @@ -1169,6 +1224,55 @@ func (s PrincipalOutput) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Defines the identity to be used for authZ of the request on the server side. +// See one pager for for more information: http://go/acl/service-identity +type RequestAuthzIdentity string + +const RequestAuthzIdentityRequestAuthzIdentityServiceIdentity RequestAuthzIdentity = `REQUEST_AUTHZ_IDENTITY_SERVICE_IDENTITY` + +const RequestAuthzIdentityRequestAuthzIdentityUserContext RequestAuthzIdentity = `REQUEST_AUTHZ_IDENTITY_USER_CONTEXT` + +// String representation for [fmt.Print] +func (f *RequestAuthzIdentity) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *RequestAuthzIdentity) Set(v string) error { + switch v { + case `REQUEST_AUTHZ_IDENTITY_SERVICE_IDENTITY`, `REQUEST_AUTHZ_IDENTITY_USER_CONTEXT`: + *f = RequestAuthzIdentity(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "REQUEST_AUTHZ_IDENTITY_SERVICE_IDENTITY", "REQUEST_AUTHZ_IDENTITY_USER_CONTEXT"`, v) + } +} + +// Type always returns RequestAuthzIdentity to satisfy [pflag.Value] interface +func (f *RequestAuthzIdentity) Type() string { + return "RequestAuthzIdentity" +} + +type ResourceInfo struct { + // Id of the current resource. + Id string `json:"id" url:"id"` + // The legacy acl path of the current resource. + LegacyAclPath string `json:"legacy_acl_path,omitempty" url:"legacy_acl_path,omitempty"` + // Parent resource info for the current resource. The parent may have + // another parent. + ParentResourceInfo *ResourceInfo `json:"parent_resource_info,omitempty" url:"parent_resource_info,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ResourceInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ResourceInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type ResourceMeta struct { // Identifier for group type. Can be local workspace group // (`WorkspaceGroup`) or account group (`Group`). diff --git a/service/jobs/model.go b/service/jobs/model.go index e23efe94c..c94c00d51 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -240,6 +240,8 @@ const CleanRoomTaskRunLifeCycleStateQueued CleanRoomTaskRunLifeCycleState = `QUE const CleanRoomTaskRunLifeCycleStateRunning CleanRoomTaskRunLifeCycleState = `RUNNING` +const CleanRoomTaskRunLifeCycleStateRunLifeCycleStateUnspecified CleanRoomTaskRunLifeCycleState = `RUN_LIFE_CYCLE_STATE_UNSPECIFIED` + const CleanRoomTaskRunLifeCycleStateSkipped CleanRoomTaskRunLifeCycleState = `SKIPPED` const CleanRoomTaskRunLifeCycleStateTerminated CleanRoomTaskRunLifeCycleState = `TERMINATED` @@ -256,11 +258,11 @@ func (f *CleanRoomTaskRunLifeCycleState) String() string { // Set raw string value and validate it against allowed values func (f *CleanRoomTaskRunLifeCycleState) Set(v string) error { switch v { - case `BLOCKED`, `INTERNAL_ERROR`, `PENDING`, `QUEUED`, `RUNNING`, `SKIPPED`, `TERMINATED`, `TERMINATING`, `WAITING_FOR_RETRY`: + case `BLOCKED`, `INTERNAL_ERROR`, `PENDING`, `QUEUED`, `RUNNING`, `RUN_LIFE_CYCLE_STATE_UNSPECIFIED`, `SKIPPED`, `TERMINATED`, `TERMINATING`, `WAITING_FOR_RETRY`: *f = CleanRoomTaskRunLifeCycleState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BLOCKED", "INTERNAL_ERROR", "PENDING", "QUEUED", "RUNNING", "SKIPPED", "TERMINATED", "TERMINATING", "WAITING_FOR_RETRY"`, v) + return fmt.Errorf(`value "%s" is not one of "BLOCKED", "INTERNAL_ERROR", "PENDING", "QUEUED", "RUNNING", "RUN_LIFE_CYCLE_STATE_UNSPECIFIED", "SKIPPED", "TERMINATED", "TERMINATING", "WAITING_FOR_RETRY"`, v) } } @@ -285,6 +287,8 @@ const CleanRoomTaskRunResultStateFailed CleanRoomTaskRunResultState = `FAILED` const CleanRoomTaskRunResultStateMaximumConcurrentRunsReached CleanRoomTaskRunResultState = `MAXIMUM_CONCURRENT_RUNS_REACHED` +const CleanRoomTaskRunResultStateRunResultStateUnspecified CleanRoomTaskRunResultState = `RUN_RESULT_STATE_UNSPECIFIED` + const CleanRoomTaskRunResultStateSuccess CleanRoomTaskRunResultState = `SUCCESS` const CleanRoomTaskRunResultStateSuccessWithFailures CleanRoomTaskRunResultState = `SUCCESS_WITH_FAILURES` @@ -305,11 +309,11 @@ func (f *CleanRoomTaskRunResultState) String() string { // Set raw string value and validate it against allowed values func (f *CleanRoomTaskRunResultState) Set(v string) error { switch v { - case `CANCELED`, `DISABLED`, `EVICTED`, `EXCLUDED`, `FAILED`, `MAXIMUM_CONCURRENT_RUNS_REACHED`, `SUCCESS`, `SUCCESS_WITH_FAILURES`, `TIMEDOUT`, `UPSTREAM_CANCELED`, `UPSTREAM_EVICTED`, `UPSTREAM_FAILED`: + case `CANCELED`, `DISABLED`, `EVICTED`, `EXCLUDED`, `FAILED`, `MAXIMUM_CONCURRENT_RUNS_REACHED`, `RUN_RESULT_STATE_UNSPECIFIED`, `SUCCESS`, `SUCCESS_WITH_FAILURES`, `TIMEDOUT`, `UPSTREAM_CANCELED`, `UPSTREAM_EVICTED`, `UPSTREAM_FAILED`: *f = CleanRoomTaskRunResultState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CANCELED", "DISABLED", "EVICTED", "EXCLUDED", "FAILED", "MAXIMUM_CONCURRENT_RUNS_REACHED", "SUCCESS", "SUCCESS_WITH_FAILURES", "TIMEDOUT", "UPSTREAM_CANCELED", "UPSTREAM_EVICTED", "UPSTREAM_FAILED"`, v) + return fmt.Errorf(`value "%s" is not one of "CANCELED", "DISABLED", "EVICTED", "EXCLUDED", "FAILED", "MAXIMUM_CONCURRENT_RUNS_REACHED", "RUN_RESULT_STATE_UNSPECIFIED", "SUCCESS", "SUCCESS_WITH_FAILURES", "TIMEDOUT", "UPSTREAM_CANCELED", "UPSTREAM_EVICTED", "UPSTREAM_FAILED"`, v) } } @@ -351,6 +355,15 @@ func (s CleanRoomsNotebookTask) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput struct { + // The run state of the clean rooms notebook task. + CleanRoomJobRunState *CleanRoomTaskRunState `json:"clean_room_job_run_state,omitempty"` + // The notebook output for the clean room run + NotebookOutput *NotebookOutput `json:"notebook_output,omitempty"` + // Information on how to access the output schema for the clean room run + OutputSchemaInfo *OutputSchemaInfo `json:"output_schema_info,omitempty"` +} + type ClusterInstance struct { // The canonical identifier for the cluster used by a run. This field is // always available for runs on existing clusters. For runs on new clusters, @@ -2110,6 +2123,27 @@ func (s NotebookTask) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Stores the catalog name, schema name, and the output schema expiration time +// for the clean room run. +type OutputSchemaInfo struct { + CatalogName string `json:"catalog_name,omitempty"` + // The expiration time for the output schema as a Unix timestamp in + // milliseconds. + ExpirationTime int64 `json:"expiration_time,omitempty"` + + SchemaName string `json:"schema_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *OutputSchemaInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s OutputSchemaInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type PauseStatus string const PauseStatusPaused PauseStatus = `PAUSED` @@ -3175,6 +3209,8 @@ func (s RunNowResponse) MarshalJSON() ([]byte, error) { // Run output was retrieved successfully. type RunOutput struct { + // The output of a clean rooms notebook task, if available + CleanRoomsNotebookOutput *CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput `json:"clean_rooms_notebook_output,omitempty"` // The output of a dbt task, if available. DbtOutput *DbtOutput `json:"dbt_output,omitempty"` // An error message indicating why a task failed or why output is not @@ -3690,6 +3726,8 @@ type SparkJarTask struct { // // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables Parameters []string `json:"parameters,omitempty"` + // Deprecated. A value of `false` is no longer supported. + RunAsRepl bool `json:"run_as_repl,omitempty"` ForceSendFields []string `json:"-"` } diff --git a/service/oauth2/model.go b/service/oauth2/model.go index a448d1690..80e7d7255 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -649,6 +649,10 @@ type UpdateCustomAppIntegration struct { // List of OAuth redirect urls to be updated in the custom OAuth app // integration RedirectUrls []string `json:"redirect_urls,omitempty"` + // List of OAuth scopes to be updated in the custom OAuth app integration, + // similar to redirect URIs this will fully replace the existing values + // instead of appending + Scopes []string `json:"scopes,omitempty"` // Token access policy to be updated in the custom OAuth app integration TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` } diff --git a/service/pkg.go b/service/pkg.go index bd49b7d6b..a1e015563 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -1,5 +1,7 @@ // Databricks SDK for Go APIs // +// - [iam.AccessControlAPI]: Rule based Access Control for Databricks Resources. +// // - [iam.AccountAccessControlAPI]: These APIs manage access rules on resources in an account. // // - [iam.AccountAccessControlProxyAPI]: These APIs manage access rules on resources in an account. @@ -299,6 +301,7 @@ import ( // https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service // See: https://pkg.go.dev/golang.org/x/tools/internal/imports#ImportPathToAssumedName var ( + _ *iam.AccessControlAPI = nil _ *iam.AccountAccessControlAPI = nil _ *iam.AccountAccessControlProxyAPI = nil _ *settings.AibiDashboardEmbeddingAccessPolicyAPI = nil diff --git a/service/serving/api.go b/service/serving/api.go index 5a2606d6b..3e7182827 100755 --- a/service/serving/api.go +++ b/service/serving/api.go @@ -74,14 +74,14 @@ type ServingEndpointsInterface interface { // Get the query schema of the serving endpoint in OpenAPI format. The schema // contains information for the supported paths, input and output format and // datatypes. - GetOpenApi(ctx context.Context, request GetOpenApiRequest) error + GetOpenApi(ctx context.Context, request GetOpenApiRequest) (*GetOpenApiResponse, error) // Get the schema for a serving endpoint. // // Get the query schema of the serving endpoint in OpenAPI format. The schema // contains information for the supported paths, input and output format and // datatypes. - GetOpenApiByName(ctx context.Context, name string) error + GetOpenApiByName(ctx context.Context, name string) (*GetOpenApiResponse, error) // Get serving endpoint permission levels. // @@ -105,6 +105,9 @@ type ServingEndpointsInterface interface { // permissions from their root object. GetPermissionsByServingEndpointId(ctx context.Context, servingEndpointId string) (*ServingEndpointPermissions, error) + // Make external services call using the credentials stored in UC Connection. + HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*ExternalFunctionResponse, error) + // Get all serving endpoints. // // This method is generated by Databricks SDK Code Generator. @@ -129,7 +132,7 @@ type ServingEndpointsInterface interface { // // Used to batch add and delete tags from a serving endpoint with a single API // call. - Patch(ctx context.Context, request PatchServingEndpointTags) ([]EndpointTag, error) + Patch(ctx context.Context, request PatchServingEndpointTags) (*EndpointTags, error) // Update rate limits of a serving endpoint. // @@ -141,7 +144,7 @@ type ServingEndpointsInterface interface { // Update AI Gateway of a serving endpoint. // // Used to update the AI Gateway of a serving endpoint. NOTE: Only external - // model endpoints are currently supported. + // model and provisioned throughput endpoints are currently supported. PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) // Query a serving endpoint. @@ -341,7 +344,7 @@ func (a *ServingEndpointsAPI) GetByName(ctx context.Context, name string) (*Serv // Get the query schema of the serving endpoint in OpenAPI format. The schema // contains information for the supported paths, input and output format and // datatypes. -func (a *ServingEndpointsAPI) GetOpenApiByName(ctx context.Context, name string) error { +func (a *ServingEndpointsAPI) GetOpenApiByName(ctx context.Context, name string) (*GetOpenApiResponse, error) { return a.servingEndpointsImpl.GetOpenApi(ctx, GetOpenApiRequest{ Name: name, }) diff --git a/service/serving/impl.go b/service/serving/impl.go index d31d545f8..220d1f3b4 100755 --- a/service/serving/impl.go +++ b/service/serving/impl.go @@ -69,14 +69,14 @@ func (a *servingEndpointsImpl) Get(ctx context.Context, request GetServingEndpoi return &servingEndpointDetailed, err } -func (a *servingEndpointsImpl) GetOpenApi(ctx context.Context, request GetOpenApiRequest) error { +func (a *servingEndpointsImpl) GetOpenApi(ctx context.Context, request GetOpenApiRequest) (*GetOpenApiResponse, error) { var getOpenApiResponse GetOpenApiResponse path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/openapi", request.Name) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" + headers["Accept"] = "text/plain" err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getOpenApiResponse) - return err + return &getOpenApiResponse, err } func (a *servingEndpointsImpl) GetPermissionLevels(ctx context.Context, request GetServingEndpointPermissionLevelsRequest) (*GetServingEndpointPermissionLevelsResponse, error) { @@ -99,6 +99,17 @@ func (a *servingEndpointsImpl) GetPermissions(ctx context.Context, request GetSe return &servingEndpointPermissions, err } +func (a *servingEndpointsImpl) HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*ExternalFunctionResponse, error) { + var externalFunctionResponse ExternalFunctionResponse + path := "/api/2.0/external-function" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &externalFunctionResponse) + return &externalFunctionResponse, err +} + func (a *servingEndpointsImpl) List(ctx context.Context) (*ListEndpointsResponse, error) { var listEndpointsResponse ListEndpointsResponse path := "/api/2.0/serving-endpoints" @@ -119,15 +130,15 @@ func (a *servingEndpointsImpl) Logs(ctx context.Context, request LogsRequest) (* return &serverLogsResponse, err } -func (a *servingEndpointsImpl) Patch(ctx context.Context, request PatchServingEndpointTags) ([]EndpointTag, error) { - var endpointTagList []EndpointTag +func (a *servingEndpointsImpl) Patch(ctx context.Context, request PatchServingEndpointTags) (*EndpointTags, error) { + var endpointTags EndpointTags path := fmt.Sprintf("/api/2.0/serving-endpoints/%v/tags", request.Name) queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &endpointTagList) - return endpointTagList, err + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &endpointTags) + return &endpointTags, err } func (a *servingEndpointsImpl) Put(ctx context.Context, request PutRequest) (*PutResponse, error) { diff --git a/service/serving/interface.go b/service/serving/interface.go index 0a38190a9..806e0f49f 100755 --- a/service/serving/interface.go +++ b/service/serving/interface.go @@ -48,7 +48,7 @@ type ServingEndpointsService interface { // Get the query schema of the serving endpoint in OpenAPI format. The // schema contains information for the supported paths, input and output // format and datatypes. - GetOpenApi(ctx context.Context, request GetOpenApiRequest) error + GetOpenApi(ctx context.Context, request GetOpenApiRequest) (*GetOpenApiResponse, error) // Get serving endpoint permission levels. // @@ -61,6 +61,10 @@ type ServingEndpointsService interface { // permissions from their root object. GetPermissions(ctx context.Context, request GetServingEndpointPermissionsRequest) (*ServingEndpointPermissions, error) + // Make external services call using the credentials stored in UC + // Connection. + HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*ExternalFunctionResponse, error) + // Get all serving endpoints. // // Use ListAll() to get all ServingEndpoint instances @@ -75,7 +79,7 @@ type ServingEndpointsService interface { // // Used to batch add and delete tags from a serving endpoint with a single // API call. - Patch(ctx context.Context, request PatchServingEndpointTags) ([]EndpointTag, error) + Patch(ctx context.Context, request PatchServingEndpointTags) (*EndpointTags, error) // Update rate limits of a serving endpoint. // @@ -87,7 +91,7 @@ type ServingEndpointsService interface { // Update AI Gateway of a serving endpoint. // // Used to update the AI Gateway of a serving endpoint. NOTE: Only external - // model endpoints are currently supported. + // model and provisioned throughput endpoints are currently supported. PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) // Query a serving endpoint. diff --git a/service/serving/model.go b/service/serving/model.go index ade652782..6d22239da 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -72,20 +72,10 @@ func (s AiGatewayGuardrailParameters) MarshalJSON() ([]byte, error) { } type AiGatewayGuardrailPiiBehavior struct { - // Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' - // is set for the input guardrail and the request contains PII, the request - // is not sent to the model server and 400 status code is returned; if - // 'BLOCK' is set for the output guardrail and the model response contains - // PII, the PII info in the response is redacted and 400 status code is - // returned. - Behavior AiGatewayGuardrailPiiBehaviorBehavior `json:"behavior"` -} - -// Behavior for PII filter. Currently only 'BLOCK' is supported. If 'BLOCK' is -// set for the input guardrail and the request contains PII, the request is not -// sent to the model server and 400 status code is returned; if 'BLOCK' is set -// for the output guardrail and the model response contains PII, the PII info in -// the response is redacted and 400 status code is returned. + // Configuration for input guardrail filters. + Behavior AiGatewayGuardrailPiiBehaviorBehavior `json:"behavior,omitempty"` +} + type AiGatewayGuardrailPiiBehaviorBehavior string const AiGatewayGuardrailPiiBehaviorBehaviorBlock AiGatewayGuardrailPiiBehaviorBehavior = `BLOCK` @@ -149,7 +139,7 @@ func (s AiGatewayInferenceTableConfig) MarshalJSON() ([]byte, error) { type AiGatewayRateLimit struct { // Used to specify how many calls are allowed for a key within the // renewal_period. - Calls int `json:"calls"` + Calls int64 `json:"calls"` // Key field for a rate limit. Currently, only 'user' and 'endpoint' are // supported, with 'endpoint' being the default if not specified. Key AiGatewayRateLimitKey `json:"key,omitempty"` @@ -158,8 +148,6 @@ type AiGatewayRateLimit struct { RenewalPeriod AiGatewayRateLimitRenewalPeriod `json:"renewal_period"` } -// Key field for a rate limit. Currently, only 'user' and 'endpoint' are -// supported, with 'endpoint' being the default if not specified. type AiGatewayRateLimitKey string const AiGatewayRateLimitKeyEndpoint AiGatewayRateLimitKey = `endpoint` @@ -187,7 +175,6 @@ func (f *AiGatewayRateLimitKey) Type() string { return "AiGatewayRateLimitKey" } -// Renewal period field for a rate limit. Currently, only 'minute' is supported. type AiGatewayRateLimitRenewalPeriod string const AiGatewayRateLimitRenewalPeriodMinute AiGatewayRateLimitRenewalPeriod = `minute` @@ -231,9 +218,9 @@ func (s AiGatewayUsageTrackingConfig) MarshalJSON() ([]byte, error) { type AmazonBedrockConfig struct { // The Databricks secret key reference for an AWS access key ID with // permissions to interact with Bedrock services. If you prefer to paste - // your API key directly, see `aws_access_key_id`. You must provide an API - // key using one of the following fields: `aws_access_key_id` or - // `aws_access_key_id_plaintext`. + // your API key directly, see `aws_access_key_id_plaintext`. You must + // provide an API key using one of the following fields: `aws_access_key_id` + // or `aws_access_key_id_plaintext`. AwsAccessKeyId string `json:"aws_access_key_id,omitempty"` // An AWS access key ID with permissions to interact with Bedrock services // provided as a plaintext string. If you prefer to reference your key using @@ -272,8 +259,6 @@ func (s AmazonBedrockConfig) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The underlying provider in Amazon Bedrock. Supported values (case -// insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. type AmazonBedrockConfigBedrockProvider string const AmazonBedrockConfigBedrockProviderAi21labs AmazonBedrockConfigBedrockProvider = `ai21labs` @@ -353,15 +338,18 @@ func (s AutoCaptureConfigInput) MarshalJSON() ([]byte, error) { } type AutoCaptureConfigOutput struct { - // The name of the catalog in Unity Catalog. + // The name of the catalog in Unity Catalog. NOTE: On update, you cannot + // change the catalog name if the inference table is already enabled. CatalogName string `json:"catalog_name,omitempty"` // Indicates whether the inference table is enabled. Enabled bool `json:"enabled,omitempty"` - // The name of the schema in Unity Catalog. + // The name of the schema in Unity Catalog. NOTE: On update, you cannot + // change the schema name if the inference table is already enabled. SchemaName string `json:"schema_name,omitempty"` State *AutoCaptureState `json:"state,omitempty"` - // The prefix of the table in Unity Catalog. + // The prefix of the table in Unity Catalog. NOTE: On update, you cannot + // change the prefix name if the inference table is already enabled. TableNamePrefix string `json:"table_name_prefix,omitempty"` ForceSendFields []string `json:"-"` @@ -468,11 +456,12 @@ func (s CohereConfig) MarshalJSON() ([]byte, error) { } type CreateServingEndpoint struct { - // The AI Gateway configuration for the serving endpoint. NOTE: only - // external model endpoints are supported as of now. + // The AI Gateway configuration for the serving endpoint. NOTE: Only + // external model and provisioned throughput endpoints are currently + // supported. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The core config of the serving endpoint. - Config EndpointCoreConfigInput `json:"config"` + Config *EndpointCoreConfigInput `json:"config,omitempty"` // The name of the serving endpoint. This field is required and must be // unique across a Databricks workspace. An endpoint name can consist of // alphanumeric characters, dashes, and underscores. @@ -497,6 +486,7 @@ func (s CreateServingEndpoint) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Details necessary to query this object's API through the DataPlane APIs. type DataPlaneInfo struct { // Authorization details as a string. AuthorizationDetails string `json:"authorization_details,omitempty"` @@ -557,7 +547,6 @@ type DeleteResponse struct { // Delete a serving endpoint type DeleteServingEndpointRequest struct { - // The name of the serving endpoint. This field is required. Name string `json:"-" url:"-"` } @@ -607,27 +596,31 @@ func (f *EmbeddingsV1ResponseEmbeddingElementObject) Type() string { type EndpointCoreConfigInput struct { // Configuration for Inference Tables which automatically logs requests and - // responses to Unity Catalog. + // responses to Unity Catalog. Note: this field is deprecated for creating + // new provisioned throughput endpoints, or updating existing provisioned + // throughput endpoints that never have inference table configured; in these + // cases please use AI Gateway to manage inference tables. AutoCaptureConfig *AutoCaptureConfigInput `json:"auto_capture_config,omitempty"` // The name of the serving endpoint to update. This field is required. Name string `json:"-" url:"-"` - // A list of served entities for the endpoint to serve. A serving endpoint - // can have up to 15 served entities. + // The list of served entities under the serving endpoint config. ServedEntities []ServedEntityInput `json:"served_entities,omitempty"` - // (Deprecated, use served_entities instead) A list of served models for the - // endpoint to serve. A serving endpoint can have up to 15 served models. + // (Deprecated, use served_entities instead) The list of served models under + // the serving endpoint config. ServedModels []ServedModelInput `json:"served_models,omitempty"` - // The traffic config defining how invocations to the serving endpoint - // should be routed. + // The traffic configuration associated with the serving endpoint config. TrafficConfig *TrafficConfig `json:"traffic_config,omitempty"` } type EndpointCoreConfigOutput struct { // Configuration for Inference Tables which automatically logs requests and - // responses to Unity Catalog. + // responses to Unity Catalog. Note: this field is deprecated for creating + // new provisioned throughput endpoints, or updating existing provisioned + // throughput endpoints that never have inference table configured; in these + // cases please use AI Gateway to manage inference tables. AutoCaptureConfig *AutoCaptureConfigOutput `json:"auto_capture_config,omitempty"` // The config version that the serving endpoint is currently serving. - ConfigVersion int `json:"config_version,omitempty"` + ConfigVersion int64 `json:"config_version,omitempty"` // The list of served entities under the serving endpoint config. ServedEntities []ServedEntityOutput `json:"served_entities,omitempty"` // (Deprecated, use served_entities instead) The list of served models under @@ -657,7 +650,10 @@ type EndpointCoreConfigSummary struct { type EndpointPendingConfig struct { // Configuration for Inference Tables which automatically logs requests and - // responses to Unity Catalog. + // responses to Unity Catalog. Note: this field is deprecated for creating + // new provisioned throughput endpoints, or updating existing provisioned + // throughput endpoints that never have inference table configured; in these + // cases please use AI Gateway to manage inference tables. AutoCaptureConfig *AutoCaptureConfigOutput `json:"auto_capture_config,omitempty"` // The config version that the serving endpoint is currently serving. ConfigVersion int `json:"config_version,omitempty"` @@ -689,7 +685,7 @@ type EndpointState struct { // pending_config is in progress, if the update failed, or if there is no // update in progress. Note that if the endpoint's config_update state value // is IN_PROGRESS, another update can not be made until the update completes - // or fails." + // or fails. ConfigUpdate EndpointStateConfigUpdate `json:"config_update,omitempty"` // The state of an endpoint, indicating whether or not the endpoint is // queryable. An endpoint is READY if all of the served entities in its @@ -698,11 +694,6 @@ type EndpointState struct { Ready EndpointStateReady `json:"ready,omitempty"` } -// The state of an endpoint's config update. This informs the user if the -// pending_config is in progress, if the update failed, or if there is no update -// in progress. Note that if the endpoint's config_update state value is -// IN_PROGRESS, another update can not be made until the update completes or -// fails." type EndpointStateConfigUpdate string const EndpointStateConfigUpdateInProgress EndpointStateConfigUpdate = `IN_PROGRESS` @@ -734,10 +725,6 @@ func (f *EndpointStateConfigUpdate) Type() string { return "EndpointStateConfigUpdate" } -// The state of an endpoint, indicating whether or not the endpoint is -// queryable. An endpoint is READY if all of the served entities in its active -// configuration are ready. If any of the actively served entities are in a -// non-ready state, the endpoint state will be NOT_READY. type EndpointStateReady string const EndpointStateReadyNotReady EndpointStateReady = `NOT_READY` @@ -782,6 +769,10 @@ func (s EndpointTag) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type EndpointTags struct { + Tags []EndpointTag `json:"tags,omitempty"` +} + // Get metrics of a serving endpoint type ExportMetricsRequest struct { // The name of the serving endpoint to retrieve metrics for. This field is @@ -793,6 +784,84 @@ type ExportMetricsResponse struct { Contents io.ReadCloser `json:"-"` } +// Simple Proto message for testing +type ExternalFunctionRequest struct { + // The connection name to use. This is required to identify the external + // connection. + ConnectionName string `json:"connection_name"` + // Additional headers for the request. If not provided, only auth headers + // from connections would be passed. + Headers string `json:"headers,omitempty"` + // The JSON payload to send in the request body. + Json string `json:"json,omitempty"` + // The HTTP method to use (e.g., 'GET', 'POST'). + Method ExternalFunctionRequestHttpMethod `json:"method"` + // Query parameters for the request. + Params string `json:"params,omitempty"` + // The relative path for the API endpoint. This is required. + Path string `json:"path"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExternalFunctionRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalFunctionRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExternalFunctionRequestHttpMethod string + +const ExternalFunctionRequestHttpMethodDelete ExternalFunctionRequestHttpMethod = `DELETE` + +const ExternalFunctionRequestHttpMethodGet ExternalFunctionRequestHttpMethod = `GET` + +const ExternalFunctionRequestHttpMethodPatch ExternalFunctionRequestHttpMethod = `PATCH` + +const ExternalFunctionRequestHttpMethodPost ExternalFunctionRequestHttpMethod = `POST` + +const ExternalFunctionRequestHttpMethodPut ExternalFunctionRequestHttpMethod = `PUT` + +// String representation for [fmt.Print] +func (f *ExternalFunctionRequestHttpMethod) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ExternalFunctionRequestHttpMethod) Set(v string) error { + switch v { + case `DELETE`, `GET`, `PATCH`, `POST`, `PUT`: + *f = ExternalFunctionRequestHttpMethod(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DELETE", "GET", "PATCH", "POST", "PUT"`, v) + } +} + +// Type always returns ExternalFunctionRequestHttpMethod to satisfy [pflag.Value] interface +func (f *ExternalFunctionRequestHttpMethod) Type() string { + return "ExternalFunctionRequestHttpMethod" +} + +type ExternalFunctionResponse struct { + // The HTTP status code of the response + StatusCode int `json:"status_code,omitempty"` + // The content of the response + Text string `json:"text,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExternalFunctionResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalFunctionResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type ExternalModel struct { // AI21Labs Config. Only required if the provider is 'ai21labs'. Ai21labsConfig *Ai21LabsConfig `json:"ai21labs_config,omitempty"` @@ -817,15 +886,12 @@ type ExternalModel struct { // The name of the provider for the external model. Currently, the supported // providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', // 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and - // 'palm'.", + // 'palm'. Provider ExternalModelProvider `json:"provider"` // The task type of the external model. Task string `json:"task"` } -// The name of the provider for the external model. Currently, the supported -// providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', -// 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and 'palm'.", type ExternalModelProvider string const ExternalModelProviderAi21labs ExternalModelProvider = `ai21labs` @@ -884,14 +950,15 @@ func (s ExternalModelUsageElement) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// All fields are not sensitive as they are hard-coded in the system and made +// available to customers. type FoundationModel struct { - // The description of the foundation model. Description string `json:"description,omitempty"` - // The display name of the foundation model. + DisplayName string `json:"display_name,omitempty"` - // The URL to the documentation of the foundation model. + Docs string `json:"docs,omitempty"` - // The name of the foundation model. + Name string `json:"name,omitempty"` ForceSendFields []string `json:"-"` @@ -912,9 +979,8 @@ type GetOpenApiRequest struct { Name string `json:"-" url:"-"` } -// The response is an OpenAPI spec in JSON format that typically includes fields -// like openapi, info, servers and paths, etc. type GetOpenApiResponse struct { + Contents io.ReadCloser `json:"-"` } // Get serving endpoint permission levels @@ -948,7 +1014,8 @@ type GoogleCloudVertexAiConfig struct { // key using one of the following fields: `private_key` or // `private_key_plaintext` // - // [Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys + // [Best practices for managing service account keys]: + // https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys PrivateKey string `json:"private_key,omitempty"` // The private key for the service account which has access to the Google // Cloud Vertex AI Service provided as a plaintext secret. See [Best @@ -957,17 +1024,19 @@ type GoogleCloudVertexAiConfig struct { // API key using one of the following fields: `private_key` or // `private_key_plaintext`. // - // [Best practices for managing service account keys]: https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys + // [Best practices for managing service account keys]: + // https://cloud.google.com/iam/docs/best-practices-for-managing-service-account-keys PrivateKeyPlaintext string `json:"private_key_plaintext,omitempty"` // This is the Google Cloud project id that the service account is // associated with. - ProjectId string `json:"project_id,omitempty"` + ProjectId string `json:"project_id"` // This is the region for the Google Cloud Vertex AI Service. See [supported // regions] for more details. Some models are only available in specific // regions. // - // [supported regions]: https://cloud.google.com/vertex-ai/docs/general/locations - Region string `json:"region,omitempty"` + // [supported regions]: + // https://cloud.google.com/vertex-ai/docs/general/locations + Region string `json:"region"` ForceSendFields []string `json:"-"` } @@ -995,11 +1064,14 @@ type LogsRequest struct { ServedModelName string `json:"-" url:"-"` } +// A representation of all DataPlaneInfo for operations that can be done on a +// model through Data Plane APIs. type ModelDataPlaneInfo struct { // Information required to query DataPlane API 'query' endpoint. QueryInfo *DataPlaneInfo `json:"query_info,omitempty"` } +// Configs needed to create an OpenAI model route. type OpenAiConfig struct { // This field is only required for Azure AD OpenAI and is the Microsoft // Entra Client ID. @@ -1098,11 +1170,10 @@ type PatchServingEndpointTags struct { } type PayloadTable struct { - // The name of the payload table. Name string `json:"name,omitempty"` - // The status of the payload table. + Status string `json:"status,omitempty"` - // The status message of the payload table. + StatusMessage string `json:"status_message,omitempty"` ForceSendFields []string `json:"-"` @@ -1116,7 +1187,6 @@ func (s PayloadTable) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Update AI Gateway of a serving endpoint type PutAiGatewayRequest struct { // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. @@ -1142,7 +1212,7 @@ type PutAiGatewayResponse struct { Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` // Configuration for payload logging using inference tables. Use these // tables to monitor and audit data being sent to and received from model - // APIs and to improve model quality . + // APIs and to improve model quality. InferenceTableConfig *AiGatewayInferenceTableConfig `json:"inference_table_config,omitempty"` // Configuration for rate limits which can be set to limit endpoint traffic. RateLimits []AiGatewayRateLimit `json:"rate_limits,omitempty"` @@ -1152,7 +1222,6 @@ type PutAiGatewayResponse struct { UsageTrackingConfig *AiGatewayUsageTrackingConfig `json:"usage_tracking_config,omitempty"` } -// Update rate limits of a serving endpoint type PutRequest struct { // The name of the serving endpoint whose rate limits are being updated. // This field is required. @@ -1303,7 +1372,7 @@ func (f *QueryEndpointResponseObject) Type() string { type RateLimit struct { // Used to specify how many calls are allowed for a key within the // renewal_period. - Calls int `json:"calls"` + Calls int64 `json:"calls"` // Key field for a serving endpoint rate limit. Currently, only 'user' and // 'endpoint' are supported, with 'endpoint' being the default if not // specified. @@ -1313,8 +1382,6 @@ type RateLimit struct { RenewalPeriod RateLimitRenewalPeriod `json:"renewal_period"` } -// Key field for a serving endpoint rate limit. Currently, only 'user' and -// 'endpoint' are supported, with 'endpoint' being the default if not specified. type RateLimitKey string const RateLimitKeyEndpoint RateLimitKey = `endpoint` @@ -1342,8 +1409,6 @@ func (f *RateLimitKey) Type() string { return "RateLimitKey" } -// Renewal period field for a serving endpoint rate limit. Currently, only -// 'minute' is supported. type RateLimitRenewalPeriod string const RateLimitRenewalPeriodMinute RateLimitRenewalPeriod = `minute` @@ -1382,10 +1447,9 @@ type ServedEntityInput struct { // Databricks Model Registry, a model in the Unity Catalog (UC), or a // function of type FEATURE_SPEC in the UC. If it is a UC object, the full // name of the object should be given in the form of - // __catalog_name__.__schema_name__.__model_name__. + // **catalog_name.schema_name.model_name**. EntityName string `json:"entity_name,omitempty"` - // The version of the model in Databricks Model Registry to be served or - // empty if the entity is a FEATURE_SPEC. + EntityVersion string `json:"entity_version,omitempty"` // An object containing a set of optional, user-specified environment // variable key-value pairs used for serving this entity. Note: this is an @@ -1414,8 +1478,7 @@ type ServedEntityInput struct { // served entity name can consist of alphanumeric characters, dashes, and // underscores. If not specified for an external model, this field defaults // to external_model.name, with '.' and ':' replaced with '-', and if not - // specified for other entities, it defaults to - // -. + // specified for other entities, it defaults to entity_name-entity_version. Name string `json:"name,omitempty"` // Whether the compute resources for the served entity should scale down to // zero. @@ -1434,8 +1497,8 @@ type ServedEntityInput struct { // available by selecting workload types like GPU_SMALL and others. See the // available [GPU types]. // - // [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types - WorkloadType string `json:"workload_type,omitempty"` + // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType ServingModelWorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-"` } @@ -1449,18 +1512,16 @@ func (s ServedEntityInput) MarshalJSON() ([]byte, error) { } type ServedEntityOutput struct { - // The creation timestamp of the served entity in Unix time. CreationTimestamp int64 `json:"creation_timestamp,omitempty"` - // The email of the user who created the served entity. + Creator string `json:"creator,omitempty"` - // The name of the entity served. The entity may be a model in the + // The name of the entity to be served. The entity may be a model in the // Databricks Model Registry, a model in the Unity Catalog (UC), or a // function of type FEATURE_SPEC in the UC. If it is a UC object, the full - // name of the object is given in the form of - // __catalog_name__.__schema_name__.__model_name__. + // name of the object should be given in the form of + // **catalog_name.schema_name.model_name**. EntityName string `json:"entity_name,omitempty"` - // The version of the served entity in Databricks Model Registry or empty if - // the entity is a FEATURE_SPEC. + EntityVersion string `json:"entity_version,omitempty"` // An object containing a set of optional, user-specified environment // variable key-value pairs used for serving this entity. Note: this is an @@ -1469,15 +1530,17 @@ type ServedEntityOutput struct { // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": // "{{secrets/my_scope2/my_key2}}"}` EnvironmentVars map[string]string `json:"environment_vars,omitempty"` - // The external model that is served. NOTE: Only one of external_model, - // foundation_model, and (entity_name, entity_version, workload_size, - // workload_type, and scale_to_zero_enabled) is returned based on the - // endpoint type. + // The external model to be served. NOTE: Only one of external_model and + // (entity_name, entity_version, workload_size, workload_type, and + // scale_to_zero_enabled) can be specified with the latter set being used + // for custom model serving for a Databricks registered model. For an + // existing endpoint with external_model, it cannot be updated to an + // endpoint without external_model. If the endpoint is created without + // external_model, users cannot update it to add external_model later. The + // task type of all external models within an endpoint must be the same. ExternalModel *ExternalModel `json:"external_model,omitempty"` - // The foundation model that is served. NOTE: Only one of foundation_model, - // external_model, and (entity_name, entity_version, workload_size, - // workload_type, and scale_to_zero_enabled) is returned based on the - // endpoint type. + // All fields are not sensitive as they are hard-coded in the system and + // made available to customers. FoundationModel *FoundationModel `json:"foundation_model,omitempty"` // ARN of the instance profile that the served entity uses to access AWS // resources. @@ -1486,12 +1549,16 @@ type ServedEntityOutput struct { MaxProvisionedThroughput int `json:"max_provisioned_throughput,omitempty"` // The minimum tokens per second that the endpoint can scale down to. MinProvisionedThroughput int `json:"min_provisioned_throughput,omitempty"` - // The name of the served entity. + // The name of a served entity. It must be unique across an endpoint. A + // served entity name can consist of alphanumeric characters, dashes, and + // underscores. If not specified for an external model, this field defaults + // to external_model.name, with '.' and ':' replaced with '-', and if not + // specified for other entities, it defaults to entity_name-entity_version. Name string `json:"name,omitempty"` // Whether the compute resources for the served entity should scale down to // zero. ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"` - // Information corresponding to the state of the served entity. + State *ServedModelState `json:"state,omitempty"` // The workload size of the served entity. The workload size corresponds to // a range of provisioned concurrency that the compute autoscales between. A @@ -1499,7 +1566,7 @@ type ServedEntityOutput struct { // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size will be 0. + // the provisioned concurrency for each workload size is 0. WorkloadSize string `json:"workload_size,omitempty"` // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this @@ -1507,8 +1574,8 @@ type ServedEntityOutput struct { // available by selecting workload types like GPU_SMALL and others. See the // available [GPU types]. // - // [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types - WorkloadType string `json:"workload_type,omitempty"` + // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType ServingModelWorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-"` } @@ -1522,24 +1589,15 @@ func (s ServedEntityOutput) MarshalJSON() ([]byte, error) { } type ServedEntitySpec struct { - // The name of the entity served. The entity may be a model in the - // Databricks Model Registry, a model in the Unity Catalog (UC), or a - // function of type FEATURE_SPEC in the UC. If it is a UC object, the full - // name of the object is given in the form of - // __catalog_name__.__schema_name__.__model_name__. EntityName string `json:"entity_name,omitempty"` - // The version of the served entity in Databricks Model Registry or empty if - // the entity is a FEATURE_SPEC. + EntityVersion string `json:"entity_version,omitempty"` - // The external model that is served. NOTE: Only one of external_model, - // foundation_model, and (entity_name, entity_version) is returned based on - // the endpoint type. + ExternalModel *ExternalModel `json:"external_model,omitempty"` - // The foundation model that is served. NOTE: Only one of foundation_model, - // external_model, and (entity_name, entity_version) is returned based on - // the endpoint type. + // All fields are not sensitive as they are hard-coded in the system and + // made available to customers. FoundationModel *FoundationModel `json:"foundation_model,omitempty"` - // The name of the served entity. + Name string `json:"name,omitempty"` ForceSendFields []string `json:"-"` @@ -1555,49 +1613,47 @@ func (s ServedEntitySpec) MarshalJSON() ([]byte, error) { type ServedModelInput struct { // An object containing a set of optional, user-specified environment - // variable key-value pairs used for serving this model. Note: this is an - // experimental feature and subject to change. Example model environment + // variable key-value pairs used for serving this entity. Note: this is an + // experimental feature and subject to change. Example entity environment // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": // "{{secrets/my_scope2/my_key2}}"}` EnvironmentVars map[string]string `json:"environment_vars,omitempty"` - // ARN of the instance profile that the served model will use to access AWS + // ARN of the instance profile that the served entity uses to access AWS // resources. InstanceProfileArn string `json:"instance_profile_arn,omitempty"` // The maximum tokens per second that the endpoint can scale up to. MaxProvisionedThroughput int `json:"max_provisioned_throughput,omitempty"` // The minimum tokens per second that the endpoint can scale down to. MinProvisionedThroughput int `json:"min_provisioned_throughput,omitempty"` - // The name of the model in Databricks Model Registry to be served or if the - // model resides in Unity Catalog, the full name of model, in the form of - // __catalog_name__.__schema_name__.__model_name__. + ModelName string `json:"model_name"` - // The version of the model in Databricks Model Registry or Unity Catalog to - // be served. + ModelVersion string `json:"model_version"` - // The name of a served model. It must be unique across an endpoint. If not - // specified, this field will default to -. A - // served model name can consist of alphanumeric characters, dashes, and - // underscores. + // The name of a served entity. It must be unique across an endpoint. A + // served entity name can consist of alphanumeric characters, dashes, and + // underscores. If not specified for an external model, this field defaults + // to external_model.name, with '.' and ':' replaced with '-', and if not + // specified for other entities, it defaults to entity_name-entity_version. Name string `json:"name,omitempty"` - // Whether the compute resources for the served model should scale down to + // Whether the compute resources for the served entity should scale down to // zero. ScaleToZeroEnabled bool `json:"scale_to_zero_enabled"` - // The workload size of the served model. The workload size corresponds to a - // range of provisioned concurrency that the compute will autoscale between. - // A single unit of provisioned concurrency can process one request at a - // time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // The workload size of the served entity. The workload size corresponds to + // a range of provisioned concurrency that the compute autoscales between. A + // single unit of provisioned concurrency can process one request at a time. + // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size will be 0. + // the provisioned concurrency for each workload size is 0. WorkloadSize ServedModelInputWorkloadSize `json:"workload_size,omitempty"` - // The workload type of the served model. The workload type selects which + // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this // parameter is "CPU". For deep learning workloads, GPU acceleration is // available by selecting workload types like GPU_SMALL and others. See the // available [GPU types]. // - // [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types WorkloadType ServedModelInputWorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-"` @@ -1611,13 +1667,6 @@ func (s ServedModelInput) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The workload size of the served model. The workload size corresponds to a -// range of provisioned concurrency that the compute will autoscale between. A -// single unit of provisioned concurrency can process one request at a time. -// Valid workload sizes are "Small" (4 - 4 provisioned concurrency), "Medium" (8 -// - 16 provisioned concurrency), and "Large" (16 - 64 provisioned concurrency). -// If scale-to-zero is enabled, the lower bound of the provisioned concurrency -// for each workload size will be 0. type ServedModelInputWorkloadSize string const ServedModelInputWorkloadSizeLarge ServedModelInputWorkloadSize = `Large` @@ -1647,13 +1696,6 @@ func (f *ServedModelInputWorkloadSize) Type() string { return "ServedModelInputWorkloadSize" } -// The workload type of the served model. The workload type selects which type -// of compute to use in the endpoint. The default value for this parameter is -// "CPU". For deep learning workloads, GPU acceleration is available by -// selecting workload types like GPU_SMALL and others. See the available [GPU -// types]. -// -// [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types type ServedModelInputWorkloadType string const ServedModelInputWorkloadTypeCpu ServedModelInputWorkloadType = `CPU` @@ -1688,49 +1730,50 @@ func (f *ServedModelInputWorkloadType) Type() string { } type ServedModelOutput struct { - // The creation timestamp of the served model in Unix time. CreationTimestamp int64 `json:"creation_timestamp,omitempty"` - // The email of the user who created the served model. + Creator string `json:"creator,omitempty"` // An object containing a set of optional, user-specified environment - // variable key-value pairs used for serving this model. Note: this is an - // experimental feature and subject to change. Example model environment + // variable key-value pairs used for serving this entity. Note: this is an + // experimental feature and subject to change. Example entity environment // variables that refer to Databricks secrets: `{"OPENAI_API_KEY": // "{{secrets/my_scope/my_key}}", "DATABRICKS_TOKEN": // "{{secrets/my_scope2/my_key2}}"}` EnvironmentVars map[string]string `json:"environment_vars,omitempty"` - // ARN of the instance profile that the served model will use to access AWS + // ARN of the instance profile that the served entity uses to access AWS // resources. InstanceProfileArn string `json:"instance_profile_arn,omitempty"` - // The name of the model in Databricks Model Registry or the full name of - // the model in Unity Catalog. + ModelName string `json:"model_name,omitempty"` - // The version of the model in Databricks Model Registry or Unity Catalog to - // be served. + ModelVersion string `json:"model_version,omitempty"` - // The name of the served model. + // The name of a served entity. It must be unique across an endpoint. A + // served entity name can consist of alphanumeric characters, dashes, and + // underscores. If not specified for an external model, this field defaults + // to external_model.name, with '.' and ':' replaced with '-', and if not + // specified for other entities, it defaults to entity_name-entity_version. Name string `json:"name,omitempty"` - // Whether the compute resources for the Served Model should scale down to + // Whether the compute resources for the served entity should scale down to // zero. ScaleToZeroEnabled bool `json:"scale_to_zero_enabled,omitempty"` - // Information corresponding to the state of the Served Model. + State *ServedModelState `json:"state,omitempty"` - // The workload size of the served model. The workload size corresponds to a - // range of provisioned concurrency that the compute will autoscale between. - // A single unit of provisioned concurrency can process one request at a - // time. Valid workload sizes are "Small" (4 - 4 provisioned concurrency), + // The workload size of the served entity. The workload size corresponds to + // a range of provisioned concurrency that the compute autoscales between. A + // single unit of provisioned concurrency can process one request at a time. + // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size will be 0. + // the provisioned concurrency for each workload size is 0. WorkloadSize string `json:"workload_size,omitempty"` - // The workload type of the served model. The workload type selects which + // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this // parameter is "CPU". For deep learning workloads, GPU acceleration is // available by selecting workload types like GPU_SMALL and others. See the // available [GPU types]. // - // [GPU types]: https://docs.databricks.com/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types - WorkloadType string `json:"workload_type,omitempty"` + // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types + WorkloadType ServingModelWorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-"` } @@ -1744,13 +1787,11 @@ func (s ServedModelOutput) MarshalJSON() ([]byte, error) { } type ServedModelSpec struct { - // The name of the model in Databricks Model Registry or the full name of - // the model in Unity Catalog. + // Only one of model_name and entity_name should be populated ModelName string `json:"model_name,omitempty"` - // The version of the model in Databricks Model Registry or Unity Catalog to - // be served. + // Only one of model_version and entity_version should be populated ModelVersion string `json:"model_version,omitempty"` - // The name of the served model. + Name string `json:"name,omitempty"` ForceSendFields []string `json:"-"` @@ -1765,20 +1806,8 @@ func (s ServedModelSpec) MarshalJSON() ([]byte, error) { } type ServedModelState struct { - // The state of the served entity deployment. DEPLOYMENT_CREATING indicates - // that the served entity is not ready yet because the deployment is still - // being created (i.e container image is building, model server is deploying - // for the first time, etc.). DEPLOYMENT_RECOVERING indicates that the - // served entity was previously in a ready state but no longer is and is - // attempting to recover. DEPLOYMENT_READY indicates that the served entity - // is ready to receive traffic. DEPLOYMENT_FAILED indicates that there was - // an error trying to bring up the served entity (e.g container image build - // failed, the model server failed to start due to a model loading error, - // etc.) DEPLOYMENT_ABORTED indicates that the deployment was terminated - // likely due to a failure in bringing up another served entity under the - // same endpoint and config version. Deployment ServedModelStateDeployment `json:"deployment,omitempty"` - // More information about the state of the served entity, if available. + DeploymentStateMessage string `json:"deployment_state_message,omitempty"` ForceSendFields []string `json:"-"` @@ -1792,17 +1821,6 @@ func (s ServedModelState) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The state of the served entity deployment. DEPLOYMENT_CREATING indicates that -// the served entity is not ready yet because the deployment is still being -// created (i.e container image is building, model server is deploying for the -// first time, etc.). DEPLOYMENT_RECOVERING indicates that the served entity was -// previously in a ready state but no longer is and is attempting to recover. -// DEPLOYMENT_READY indicates that the served entity is ready to receive -// traffic. DEPLOYMENT_FAILED indicates that there was an error trying to bring -// up the served entity (e.g container image build failed, the model server -// failed to start due to a model loading error, etc.) DEPLOYMENT_ABORTED -// indicates that the deployment was terminated likely due to a failure in -// bringing up another served entity under the same endpoint and config version. type ServedModelStateDeployment string const ServedModelStateDeploymentAborted ServedModelStateDeployment = `DEPLOYMENT_ABORTED` @@ -1844,7 +1862,8 @@ type ServerLogsResponse struct { type ServingEndpoint struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only - // external model endpoints are currently supported. + // external model and provisioned throughput endpoints are currently + // supported. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigSummary `json:"config,omitempty"` @@ -1852,8 +1871,8 @@ type ServingEndpoint struct { CreationTimestamp int64 `json:"creation_timestamp,omitempty"` // The email of the user who created the serving endpoint. Creator string `json:"creator,omitempty"` - // System-generated ID of the endpoint. This is used to refer to the - // endpoint in the Permissions API + // System-generated ID of the endpoint, included to be used by the + // Permissions API. Id string `json:"id,omitempty"` // The timestamp when the endpoint was last updated by a user in Unix time. LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` @@ -1923,7 +1942,8 @@ func (s ServingEndpointAccessControlResponse) MarshalJSON() ([]byte, error) { type ServingEndpointDetailed struct { // The AI Gateway configuration for the serving endpoint. NOTE: Only - // external model endpoints are currently supported. + // external model and provisioned throughput endpoints are currently + // supported. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigOutput `json:"config,omitempty"` @@ -1967,7 +1987,6 @@ func (s ServingEndpointDetailed) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The permission level of the principal making the request. type ServingEndpointDetailedPermissionLevel string const ServingEndpointDetailedPermissionLevelCanManage ServingEndpointDetailedPermissionLevel = `CAN_MANAGE` @@ -2085,6 +2104,39 @@ type ServingEndpointPermissionsRequest struct { ServingEndpointId string `json:"-" url:"-"` } +type ServingModelWorkloadType string + +const ServingModelWorkloadTypeCpu ServingModelWorkloadType = `CPU` + +const ServingModelWorkloadTypeGpuLarge ServingModelWorkloadType = `GPU_LARGE` + +const ServingModelWorkloadTypeGpuMedium ServingModelWorkloadType = `GPU_MEDIUM` + +const ServingModelWorkloadTypeGpuSmall ServingModelWorkloadType = `GPU_SMALL` + +const ServingModelWorkloadTypeMultigpuMedium ServingModelWorkloadType = `MULTIGPU_MEDIUM` + +// String representation for [fmt.Print] +func (f *ServingModelWorkloadType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ServingModelWorkloadType) Set(v string) error { + switch v { + case `CPU`, `GPU_LARGE`, `GPU_MEDIUM`, `GPU_SMALL`, `MULTIGPU_MEDIUM`: + *f = ServingModelWorkloadType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CPU", "GPU_LARGE", "GPU_MEDIUM", "GPU_SMALL", "MULTIGPU_MEDIUM"`, v) + } +} + +// Type always returns ServingModelWorkloadType to satisfy [pflag.Value] interface +func (f *ServingModelWorkloadType) Type() string { + return "ServingModelWorkloadType" +} + type TrafficConfig struct { // The list of routes that define traffic to each served entity. Routes []Route `json:"routes,omitempty"` diff --git a/service/sharing/api.go b/service/sharing/api.go index 941f6ae3f..57d1f45e2 100755 --- a/service/sharing/api.go +++ b/service/sharing/api.go @@ -335,7 +335,7 @@ type RecipientsInterface interface { // Create a share recipient. // // Creates a new recipient with the delta sharing authentication type in the - // metastore. The caller must be a metastore admin or has the + // metastore. The caller must be a metastore admin or have the // **CREATE_RECIPIENT** privilege on the metastore. Create(ctx context.Context, request CreateRecipient) (*RecipientInfo, error) @@ -385,15 +385,6 @@ type RecipientsInterface interface { // This method is generated by Databricks SDK Code Generator. ListAll(ctx context.Context, request ListRecipientsRequest) ([]RecipientInfo, error) - // RecipientInfoNameToMetastoreIdMap calls [RecipientsAPI.ListAll] and creates a map of results with [RecipientInfo].Name as key and [RecipientInfo].MetastoreId as value. - // - // Returns an error if there's more than one [RecipientInfo] with the same .Name. - // - // Note: All [RecipientInfo] instances are loaded into memory before creating a map. - // - // This method is generated by Databricks SDK Code Generator. - RecipientInfoNameToMetastoreIdMap(ctx context.Context, request ListRecipientsRequest) (map[string]string, error) - // Rotate a token. // // Refreshes the specified recipient's delta sharing authentication token with @@ -418,7 +409,7 @@ type RecipientsInterface interface { // metastore admin or the owner of the recipient. If the recipient name will be // updated, the user must be both a metastore admin and the owner of the // recipient. - Update(ctx context.Context, request UpdateRecipient) error + Update(ctx context.Context, request UpdateRecipient) (*RecipientInfo, error) } func NewRecipients(client *client.DatabricksClient) *RecipientsAPI { @@ -516,31 +507,6 @@ func (a *RecipientsAPI) ListAll(ctx context.Context, request ListRecipientsReque return listing.ToSlice[RecipientInfo](ctx, iterator) } -// RecipientInfoNameToMetastoreIdMap calls [RecipientsAPI.ListAll] and creates a map of results with [RecipientInfo].Name as key and [RecipientInfo].MetastoreId as value. -// -// Returns an error if there's more than one [RecipientInfo] with the same .Name. -// -// Note: All [RecipientInfo] instances are loaded into memory before creating a map. -// -// This method is generated by Databricks SDK Code Generator. -func (a *RecipientsAPI) RecipientInfoNameToMetastoreIdMap(ctx context.Context, request ListRecipientsRequest) (map[string]string, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") - mapping := map[string]string{} - result, err := a.ListAll(ctx, request) - if err != nil { - return nil, err - } - for _, v := range result { - key := v.Name - _, duplicate := mapping[key] - if duplicate { - return nil, fmt.Errorf("duplicate .Name: %s", key) - } - mapping[key] = v.MetastoreId - } - return mapping, nil -} - // Get recipient share permissions. // // Gets the share permissions for the specified Recipient. The caller must be a diff --git a/service/sharing/impl.go b/service/sharing/impl.go index c6e2bccb6..da8223ee2 100755 --- a/service/sharing/impl.go +++ b/service/sharing/impl.go @@ -171,15 +171,15 @@ func (a *recipientsImpl) SharePermissions(ctx context.Context, request SharePerm return &getRecipientSharePermissionsResponse, err } -func (a *recipientsImpl) Update(ctx context.Context, request UpdateRecipient) error { - var updateResponse UpdateResponse +func (a *recipientsImpl) Update(ctx context.Context, request UpdateRecipient) (*RecipientInfo, error) { + var recipientInfo RecipientInfo path := fmt.Sprintf("/api/2.1/unity-catalog/recipients/%v", request.Name) queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) - return err + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &recipientInfo) + return &recipientInfo, err } // unexported type that holds implementations of just Shares API methods diff --git a/service/sharing/interface.go b/service/sharing/interface.go index 4f7e44ca9..ffbf87b16 100755 --- a/service/sharing/interface.go +++ b/service/sharing/interface.go @@ -107,7 +107,7 @@ type RecipientsService interface { // Create a share recipient. // // Creates a new recipient with the delta sharing authentication type in the - // metastore. The caller must be a metastore admin or has the + // metastore. The caller must be a metastore admin or have the // **CREATE_RECIPIENT** privilege on the metastore. Create(ctx context.Context, request CreateRecipient) (*RecipientInfo, error) @@ -154,7 +154,7 @@ type RecipientsService interface { // metastore admin or the owner of the recipient. If the recipient name will // be updated, the user must be both a metastore admin and the owner of the // recipient. - Update(ctx context.Context, request UpdateRecipient) error + Update(ctx context.Context, request UpdateRecipient) (*RecipientInfo, error) } // A share is a container instantiated with :method:shares/create. Once created diff --git a/service/sharing/model.go b/service/sharing/model.go index 4da345b00..e77d94b6c 100755 --- a/service/sharing/model.go +++ b/service/sharing/model.go @@ -44,8 +44,8 @@ type CreateProvider struct { Comment string `json:"comment,omitempty"` // The name of the Provider. Name string `json:"name"` - // This field is required when the __authentication_type__ is **TOKEN** or - // not provided. + // This field is required when the __authentication_type__ is **TOKEN**, + // **OAUTH_CLIENT_CREDENTIALS** or not provided. RecipientProfileStr string `json:"recipient_profile_str,omitempty"` ForceSendFields []string `json:"-"` @@ -65,7 +65,7 @@ type CreateRecipient struct { // Description about the recipient. Comment string `json:"comment,omitempty"` // The global Unity Catalog metastore id provided by the data recipient. - // This field is required when the __authentication_type__ is + // This field is only present when the __authentication_type__ is // **DATABRICKS**. The identifier is of format // __cloud__:__region__:__metastore-uuid__. DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` @@ -77,10 +77,13 @@ type CreateRecipient struct { Name string `json:"name"` // Username of the recipient owner. Owner string `json:"owner,omitempty"` - // Recipient properties as map of string key-value pairs. + // Recipient properties as map of string key-value pairs. When provided in + // update request, the specified properties will override the existing + // properties. To add and remove properties, one would need to perform a + // read-modify-write. PropertiesKvpairs *SecurablePropertiesKvPairs `json:"properties_kvpairs,omitempty"` // The one-time sharing code provided by the data recipient. This field is - // required when the __authentication_type__ is **DATABRICKS**. + // only present when the __authentication_type__ is **DATABRICKS**. SharingCode string `json:"sharing_code,omitempty"` ForceSendFields []string `json:"-"` @@ -565,7 +568,7 @@ type ProviderInfo struct { CreatedBy string `json:"created_by,omitempty"` // The global UC metastore id of the data provider. This field is only // present when the __authentication_type__ is **DATABRICKS**. The - // identifier is of format ::. + // identifier is of format __cloud__:__region__:__metastore-uuid__. DataProviderGlobalMetastoreId string `json:"data_provider_global_metastore_id,omitempty"` // UUID of the provider's UC metastore. This field is only present when the // __authentication_type__ is **DATABRICKS**. @@ -575,17 +578,17 @@ type ProviderInfo struct { // Username of Provider owner. Owner string `json:"owner,omitempty"` // The recipient profile. This field is only present when the - // authentication_type is `TOKEN`. + // authentication_type is `TOKEN` or `OAUTH_CLIENT_CREDENTIALS`. RecipientProfile *RecipientProfile `json:"recipient_profile,omitempty"` - // This field is only present when the authentication_type is `TOKEN` or not - // provided. + // This field is required when the __authentication_type__ is **TOKEN**, + // **OAUTH_CLIENT_CREDENTIALS** or not provided. RecipientProfileStr string `json:"recipient_profile_str,omitempty"` // Cloud region of the provider's UC metastore. This field is only present // when the __authentication_type__ is **DATABRICKS**. Region string `json:"region,omitempty"` // Time at which this Provider was created, in epoch milliseconds. UpdatedAt int64 `json:"updated_at,omitempty"` - // Username of user who last modified Share. + // Username of user who last modified Provider. UpdatedBy string `json:"updated_by,omitempty"` ForceSendFields []string `json:"-"` @@ -623,8 +626,8 @@ type RecipientInfo struct { ActivationUrl string `json:"activation_url,omitempty"` // The delta sharing authentication type. AuthenticationType AuthenticationType `json:"authentication_type,omitempty"` - // Cloud vendor of the recipient's Unity Catalog Metstore. This field is - // only present when the __authentication_type__ is **DATABRICKS**`. + // Cloud vendor of the recipient's Unity Catalog Metastore. This field is + // only present when the __authentication_type__ is **DATABRICKS**. Cloud string `json:"cloud,omitempty"` // Description about the recipient. Comment string `json:"comment,omitempty"` @@ -637,18 +640,23 @@ type RecipientInfo struct { // **DATABRICKS**. The identifier is of format // __cloud__:__region__:__metastore-uuid__. DataRecipientGlobalMetastoreId string `json:"data_recipient_global_metastore_id,omitempty"` + // Expiration timestamp of the token, in epoch milliseconds. + ExpirationTime int64 `json:"expiration_time,omitempty"` // IP Access List IpAccessList *IpAccessList `json:"ip_access_list,omitempty"` - // Unique identifier of recipient's Unity Catalog metastore. This field is - // only present when the __authentication_type__ is **DATABRICKS** + // Unique identifier of recipient's Unity Catalog Metastore. This field is + // only present when the __authentication_type__ is **DATABRICKS**. MetastoreId string `json:"metastore_id,omitempty"` // Name of Recipient. Name string `json:"name,omitempty"` // Username of the recipient owner. Owner string `json:"owner,omitempty"` - // Recipient properties as map of string key-value pairs. + // Recipient properties as map of string key-value pairs. When provided in + // update request, the specified properties will override the existing + // properties. To add and remove properties, one would need to perform a + // read-modify-write. PropertiesKvpairs *SecurablePropertiesKvPairs `json:"properties_kvpairs,omitempty"` - // Cloud region of the recipient's Unity Catalog Metstore. This field is + // Cloud region of the recipient's Unity Catalog Metastore. This field is // only present when the __authentication_type__ is **DATABRICKS**. Region string `json:"region,omitempty"` // The one-time sharing code provided by the data recipient. This field is @@ -695,7 +703,7 @@ type RecipientTokenInfo struct { // Full activation URL to retrieve the access token. It will be empty if the // token is already retrieved. ActivationUrl string `json:"activation_url,omitempty"` - // Time at which this recipient Token was created, in epoch milliseconds. + // Time at which this recipient token was created, in epoch milliseconds. CreatedAt int64 `json:"created_at,omitempty"` // Username of recipient token creator. CreatedBy string `json:"created_by,omitempty"` @@ -703,9 +711,9 @@ type RecipientTokenInfo struct { ExpirationTime int64 `json:"expiration_time,omitempty"` // Unique ID of the recipient token. Id string `json:"id,omitempty"` - // Time at which this recipient Token was updated, in epoch milliseconds. + // Time at which this recipient token was updated, in epoch milliseconds. UpdatedAt int64 `json:"updated_at,omitempty"` - // Username of recipient Token updater. + // Username of recipient token updater. UpdatedBy string `json:"updated_by,omitempty"` ForceSendFields []string `json:"-"` @@ -752,7 +760,7 @@ type RotateRecipientToken struct { // cannot extend the expiration_time. Use 0 to expire the existing token // immediately, negative number will return an error. ExistingTokenExpireInSeconds int64 `json:"existing_token_expire_in_seconds"` - // The name of the recipient. + // The name of the Recipient. Name string `json:"-" url:"-"` } @@ -763,9 +771,6 @@ type SecurablePropertiesKvPairs struct { Properties map[string]string `json:"properties"` } -// A map of key-value properties attached to the securable. -type SecurablePropertiesMap map[string]string - type ShareInfo struct { // User-provided free-form text description. Comment string `json:"comment,omitempty"` @@ -1052,8 +1057,8 @@ type UpdateProvider struct { NewName string `json:"new_name,omitempty"` // Username of Provider owner. Owner string `json:"owner,omitempty"` - // This field is required when the __authentication_type__ is **TOKEN** or - // not provided. + // This field is required when the __authentication_type__ is **TOKEN**, + // **OAUTH_CLIENT_CREDENTIALS** or not provided. RecipientProfileStr string `json:"recipient_profile_str,omitempty"` ForceSendFields []string `json:"-"` @@ -1076,7 +1081,7 @@ type UpdateRecipient struct { IpAccessList *IpAccessList `json:"ip_access_list,omitempty"` // Name of the recipient. Name string `json:"-" url:"-"` - // New name for the recipient. + // New name for the recipient. . NewName string `json:"new_name,omitempty"` // Username of the recipient owner. Owner string `json:"owner,omitempty"` @@ -1097,9 +1102,6 @@ func (s UpdateRecipient) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type UpdateResponse struct { -} - type UpdateShare struct { // User-provided free-form text description. Comment string `json:"comment,omitempty"` diff --git a/service/sharing/recipients_usage_test.go b/service/sharing/recipients_usage_test.go index ee78c5abe..0d312454c 100755 --- a/service/sharing/recipients_usage_test.go +++ b/service/sharing/recipients_usage_test.go @@ -159,7 +159,7 @@ func ExampleRecipientsAPI_Update_recipients() { } logger.Infof(ctx, "found %v", created) - err = w.Recipients.Update(ctx, sharing.UpdateRecipient{ + _, err = w.Recipients.Update(ctx, sharing.UpdateRecipient{ Name: created.Name, Comment: fmt.Sprintf("sdk-%x", time.Now().UnixNano()), }) diff --git a/version/version.go b/version/version.go index 142d27dac..00cf8e860 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.55.0" +const Version = "0.56.0" diff --git a/workspace_client.go b/workspace_client.go index f0bc03167..b129ce30d 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -32,6 +32,9 @@ type WorkspaceClient struct { Config *config.Config apiClient *httpclient.ApiClient + // Rule based Access Control for Databricks Resources. + AccessControl iam.AccessControlInterface + // These APIs manage access rules on resources in an account. Currently, // only grant rules are supported. A grant rule specifies a role assigned to // a set of principals. A list of rules attached to a resource is called a @@ -293,10 +296,15 @@ type WorkspaceClient struct { // // The Files API has two distinct endpoints, one for working with files // (`/fs/files`) and another one for working with directories - // (`/fs/directories`). Both endpoints, use the standard HTTP methods GET, + // (`/fs/directories`). Both endpoints use the standard HTTP methods GET, // HEAD, PUT, and DELETE to manage files and directories specified using // their URI path. The path is always absolute. // + // Some Files API client features are currently experimental. To enable + // them, set `enable_experimental_files_api_client = True` in your + // configuration profile or use the environment variable + // `DATABRICKS_ENABLE_EXPERIMENTAL_FILES_API_CLIENT=True`. + // // [Unity Catalog volumes]: https://docs.databricks.com/en/connect/unity-catalog/volumes.html Files files.FilesInterface @@ -1150,6 +1158,7 @@ func NewWorkspaceClient(c ...*Config) (*WorkspaceClient, error) { Config: cfg, apiClient: apiClient, + AccessControl: iam.NewAccessControl(databricksClient), AccountAccessControlProxy: iam.NewAccountAccessControlProxy(databricksClient), Alerts: sql.NewAlerts(databricksClient), AlertsLegacy: sql.NewAlertsLegacy(databricksClient), From 18cebf1d5ca8889ae82f660c96fecc8bc5b73be5 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Thu, 23 Jan 2025 13:09:44 +0100 Subject: [PATCH 07/54] [Fix] Do not send query parameters when set to zero value (#1136) ## What changes are proposed in this pull request? #1124 fixed the handling of APIs that use query parameters and a body simultaneously. After that change, query parameters are always sent, even if set to the zero value. This PR addresses this, using the same logic for query parameters as for fields in the body: if a field is set to its zero value, it will be included in the query parameter if it is present in ForceSendFields. ## How is this tested? This behavior is dependend upon in the Terraform provider. I'll be using it in https://github.com/databricks/terraform-provider-databricks/pull/4430 and verifying that there is no behavior change in the generated request. When using this PR in Terraform, tests asserting that the path doesn't include query parameters pass. --- service/apps/impl.go | 5 ++++- service/files/impl.go | 5 ++++- service/oauth2/impl.go | 17 +++++++++++++---- service/pkg.go | 4 ++-- service/sharing/impl.go | 9 +++++++-- 5 files changed, 30 insertions(+), 10 deletions(-) diff --git a/service/apps/impl.go b/service/apps/impl.go index 21121c39d..b83aec396 100755 --- a/service/apps/impl.go +++ b/service/apps/impl.go @@ -8,6 +8,7 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "golang.org/x/exp/slices" ) // unexported type that holds implementations of just Apps API methods @@ -19,7 +20,9 @@ func (a *appsImpl) Create(ctx context.Context, request CreateAppRequest) (*App, var app App path := "/api/2.0/apps" queryParams := make(map[string]any) - queryParams["no_compute"] = request.NoCompute + if request.NoCompute != false || slices.Contains(request.ForceSendFields, "NoCompute") { + queryParams["no_compute"] = request.NoCompute + } headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" diff --git a/service/files/impl.go b/service/files/impl.go index 6e83c42cd..e3f6930bc 100755 --- a/service/files/impl.go +++ b/service/files/impl.go @@ -9,6 +9,7 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/httpclient" + "golang.org/x/exp/slices" ) // unexported type that holds implementations of just Dbfs API methods @@ -197,7 +198,9 @@ func (a *filesImpl) Upload(ctx context.Context, request UploadRequest) error { var uploadResponse UploadResponse path := fmt.Sprintf("/api/2.0/fs/files%v", httpclient.EncodeMultiSegmentPathParameter(request.FilePath)) queryParams := make(map[string]any) - queryParams["overwrite"] = request.Overwrite + if request.Overwrite != false || slices.Contains(request.ForceSendFields, "Overwrite") { + queryParams["overwrite"] = request.Overwrite + } headers := make(map[string]string) headers["Content-Type"] = "application/octet-stream" err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request.Contents, &uploadResponse) diff --git a/service/oauth2/impl.go b/service/oauth2/impl.go index 484596f10..d8db890dc 100755 --- a/service/oauth2/impl.go +++ b/service/oauth2/impl.go @@ -8,6 +8,7 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "golang.org/x/exp/slices" ) // unexported type that holds implementations of just AccountFederationPolicy API methods @@ -19,7 +20,9 @@ func (a *accountFederationPolicyImpl) Create(ctx context.Context, request Create var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/federationPolicies", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) - queryParams["policy_id"] = request.PolicyId + if request.PolicyId != "" || slices.Contains(request.ForceSendFields, "PolicyId") { + queryParams["policy_id"] = request.PolicyId + } headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" @@ -61,7 +64,9 @@ func (a *accountFederationPolicyImpl) Update(ctx context.Context, request Update var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.PolicyId) queryParams := make(map[string]any) - queryParams["update_mask"] = request.UpdateMask + if request.UpdateMask != "" || slices.Contains(request.ForceSendFields, "UpdateMask") { + queryParams["update_mask"] = request.UpdateMask + } headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" @@ -207,7 +212,9 @@ func (a *servicePrincipalFederationPolicyImpl) Create(ctx context.Context, reque var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies", a.client.ConfiguredAccountID(), request.ServicePrincipalId) queryParams := make(map[string]any) - queryParams["policy_id"] = request.PolicyId + if request.PolicyId != "" || slices.Contains(request.ForceSendFields, "PolicyId") { + queryParams["policy_id"] = request.PolicyId + } headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" @@ -249,7 +256,9 @@ func (a *servicePrincipalFederationPolicyImpl) Update(ctx context.Context, reque var federationPolicy FederationPolicy path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies/%v", a.client.ConfiguredAccountID(), request.ServicePrincipalId, request.PolicyId) queryParams := make(map[string]any) - queryParams["update_mask"] = request.UpdateMask + if request.UpdateMask != "" || slices.Contains(request.ForceSendFields, "UpdateMask") { + queryParams["update_mask"] = request.UpdateMask + } headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" diff --git a/service/pkg.go b/service/pkg.go index a1e015563..f26f56eda 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -50,10 +50,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. -// // - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. // +// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. diff --git a/service/sharing/impl.go b/service/sharing/impl.go index da8223ee2..e6b6d33ec 100755 --- a/service/sharing/impl.go +++ b/service/sharing/impl.go @@ -8,6 +8,7 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "golang.org/x/exp/slices" "github.com/databricks/databricks-sdk-go/service/catalog" ) @@ -253,8 +254,12 @@ func (a *sharesImpl) UpdatePermissions(ctx context.Context, request UpdateShareP var updatePermissionsResponse UpdatePermissionsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/shares/%v/permissions", request.Name) queryParams := make(map[string]any) - queryParams["max_results"] = request.MaxResults - queryParams["page_token"] = request.PageToken + if request.MaxResults != 0 || slices.Contains(request.ForceSendFields, "MaxResults") { + queryParams["max_results"] = request.MaxResults + } + if request.PageToken != "" || slices.Contains(request.ForceSendFields, "PageToken") { + queryParams["page_token"] = request.PageToken + } headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" From bf617bb7a6f46370b94886dd674e4721b17224fd Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Thu, 23 Jan 2025 14:00:29 +0100 Subject: [PATCH 08/54] [Release] Release v0.56.1 (#1137) ### Bug Fixes * Do not send query parameters when set to zero value ([#1136](https://github.com/databricks/databricks-sdk-go/pull/1136)). --- CHANGELOG.md | 8 ++++++++ version/version.go | 2 +- 2 files changed, 9 insertions(+), 1 deletion(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index 61a339db8..4a156deb1 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,13 @@ # Version changelog +## [Release] Release v0.56.1 + +### Bug Fixes + + * Do not send query parameters when set to zero value ([#1136](https://github.com/databricks/databricks-sdk-go/pull/1136)). + + + ## [Release] Release v0.56.0 ### Bug Fixes diff --git a/version/version.go b/version/version.go index 00cf8e860..61aa3beb4 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.56.0" +const Version = "0.56.1" From 31fdc692bb1959ea4be6705526050267e7e6ebf9 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Mon, 3 Feb 2025 16:02:55 +0100 Subject: [PATCH 09/54] [Feature] Add support for async OAuth token refreshes (#1135) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What changes are proposed in this pull request? This PR aims at eliminating long-tail latency due to OAuth token refreshes in scenarios where a single client is responsible for a relatively high (e.g. > 1 QPS) continuous outbound traffic. The feature is disabled by default — which arguably makes this PR a functional no-op. Precisely, the PR introduces a new token cache which attempts to always keep its token fresh by asynchronously refreshing the token before it expires. We differentiate three token states: - `fresh`: The token is valid and is not close to its expiration. - `stale`: The token is valid but will expire soon. - `expired`: The token has expired and cannot be used. Each time a request tries to access the token, we do the following: - If the token is `fresh`, return the current token; - If the token is `stale`, trigger an asynchronous refresh and return the current token; - If the token is `expired`, make a blocking refresh call to update the token and return it. In particular, asynchronous refreshes use a lock to guarantee that there can only be one pending refresh at a given time. The performance of the algorithm depends on the length of the `stale` and `fresh` periods. On the first hand, the `stale` period must be long enough to prevent tokens from entering the expired state. On the other hand, a long `stale` period reduces the length of the `fresh` period, thus increasing the refresh frequency. Right now, the `stale` period is configured to 3 minutes by default (i.e. 5% of the expected token lifespan of 1 hour). This value might be changed in the future to guarantee that the default behavior achieves the best performance for the majority of users. **For reviewers:** - This PR only uses the new cache in control-plane auth flows; I plan to send a follow-up PR to enable asynchronous refresh in data-plane flows once this one has been merged. - Interface `oauth2.TokenSource` is likely not sufficient for us and we would need one with a `Token` method that takes a `context.Context` as parameter: `Token(context.Context) (Token, error)`. ## How is this tested? Complete test coverage with a focus on various concurrency scenarios. --- config/experimental/auth/auth.go | 212 +++++++++++++++++++ config/experimental/auth/auth_test.go | 280 ++++++++++++++++++++++++++ config/oauth_visitors.go | 48 +++-- 3 files changed, 528 insertions(+), 12 deletions(-) create mode 100644 config/experimental/auth/auth.go create mode 100644 config/experimental/auth/auth_test.go diff --git a/config/experimental/auth/auth.go b/config/experimental/auth/auth.go new file mode 100644 index 000000000..2f560498b --- /dev/null +++ b/config/experimental/auth/auth.go @@ -0,0 +1,212 @@ +// Package auth is an internal package that provides authentication utilities. +// +// IMPORTANT: This package is not meant to be used directly by consumers of the +// SDK and is subject to change without notice. +package auth + +import ( + "sync" + "time" + + "golang.org/x/oauth2" +) + +const ( + // Default duration for the stale period. The number as been set arbitrarily + // and might be changed in the future. + defaultStaleDuration = 3 * time.Minute + + // Disable the asynchronous token refresh by default. This is meant to + // change in the future once the feature is stable. + defaultDisableAsyncRefresh = true +) + +type Option func(*cachedTokenSource) + +// WithCachedToken sets the initial token to be used by a cached token source. +func WithCachedToken(t *oauth2.Token) Option { + return func(cts *cachedTokenSource) { + cts.cachedToken = t + } +} + +// WithAsyncRefresh enables or disables the asynchronous token refresh. +func WithAsyncRefresh(b bool) Option { + return func(cts *cachedTokenSource) { + cts.disableAsync = !b + } +} + +// NewCachedTokenProvider wraps a [oauth2.TokenSource] to cache the tokens +// it returns. By default, the cache will refresh tokens asynchronously a few +// minutes before they expire. +// +// The token cache is safe for concurrent use by multiple goroutines and will +// guarantee that only one token refresh is triggered at a time. +// +// The token cache does not take care of retries in case the token source +// returns and error; it is the responsibility of the provided token source to +// handle retries appropriately. +// +// If the TokenSource is already a cached token source (obtained by calling this +// function), it is returned as is. +func NewCachedTokenSource(ts oauth2.TokenSource, opts ...Option) oauth2.TokenSource { + // This is meant as a niche optimization to avoid double caching of the + // token source in situations where the user calls needs caching guarantees + // but does not know if the token source is already cached. + if cts, ok := ts.(*cachedTokenSource); ok { + return cts + } + + cts := &cachedTokenSource{ + tokenSource: ts, + staleDuration: defaultStaleDuration, + disableAsync: defaultDisableAsyncRefresh, + cachedToken: nil, + timeNow: time.Now, + } + + for _, opt := range opts { + opt(cts) + } + + return cts +} + +type cachedTokenSource struct { + // The token source to obtain tokens from. + tokenSource oauth2.TokenSource + + // If true, only refresh the token with a blocking call when it is expired. + disableAsync bool + + // Duration during which a token is considered stale, see tokenState. + staleDuration time.Duration + + mu sync.Mutex + cachedToken *oauth2.Token + + // Indicates that an async refresh is in progress. This is used to prevent + // multiple async refreshes from being triggered at the same time. + isRefreshing bool + + // Error returned by the last refresh. Async refreshes are disabled if this + // value is not nil so that the cache does not continue sending request to + // a potentially failing server. The next blocking call will re-enable async + // refreshes by setting this value to nil if it succeeds, or return the + // error if it fails. + refreshErr error + + timeNow func() time.Time // for testing +} + +// Token returns a token from the cache or fetches a new one if the current +// token is expired. +func (cts *cachedTokenSource) Token() (*oauth2.Token, error) { + if cts.disableAsync { + return cts.blockingToken() + } + return cts.asyncToken() +} + +// tokenState represents the state of the token. Each token can be in one of +// the following three states: +// - fresh: The token is valid. +// - stale: The token is valid but will expire soon. +// - expired: The token has expired and cannot be used. +// +// Token state through time: +// +// issue time expiry time +// v v +// | fresh | stale | expired -> time +// | valid | +type tokenState int + +const ( + fresh tokenState = iota // The token is valid. + stale // The token is valid but will expire soon. + expired // The token has expired and cannot be used. +) + +// tokenState returns the state of the token. The function is not thread-safe +// and should be called with the lock held. +func (c *cachedTokenSource) tokenState() tokenState { + if c.cachedToken == nil { + return expired + } + switch lifeSpan := c.cachedToken.Expiry.Sub(c.timeNow()); { + case lifeSpan <= 0: + return expired + case lifeSpan <= c.staleDuration: + return stale + default: + return fresh + } +} + +func (cts *cachedTokenSource) asyncToken() (*oauth2.Token, error) { + cts.mu.Lock() + ts := cts.tokenState() + t := cts.cachedToken + cts.mu.Unlock() + + switch ts { + case fresh: + return t, nil + case stale: + cts.triggerAsyncRefresh() + return t, nil + default: // expired + return cts.blockingToken() + } +} + +func (cts *cachedTokenSource) blockingToken() (*oauth2.Token, error) { + cts.mu.Lock() + + // The lock is kept for the entire operation to ensure that only one + // blockingToken operation is running at a time. + defer cts.mu.Unlock() + + // This is important to recover from potential previous failed attempts + // to refresh the token asynchronously, see declaration of refreshErr for + // more information. + cts.isRefreshing = false + cts.refreshErr = nil + + // It's possible that the token got refreshed (either by a blockingToken or + // an asyncRefresh call) while this particular call was waiting to acquire + // the mutex. This check avoids refreshing the token again in such cases. + if ts := cts.tokenState(); ts != expired { // fresh or stale + return cts.cachedToken, nil + } + + t, err := cts.tokenSource.Token() + if err != nil { + return nil, err + } + cts.cachedToken = t + return t, nil +} + +func (cts *cachedTokenSource) triggerAsyncRefresh() { + cts.mu.Lock() + defer cts.mu.Unlock() + if !cts.isRefreshing && cts.refreshErr == nil { + cts.isRefreshing = true + + go func() { + t, err := cts.tokenSource.Token() + + cts.mu.Lock() + defer cts.mu.Unlock() + cts.isRefreshing = false + if err != nil { + cts.refreshErr = err + return + } + cts.cachedToken = t + }() + } +} diff --git a/config/experimental/auth/auth_test.go b/config/experimental/auth/auth_test.go new file mode 100644 index 000000000..035ebe42d --- /dev/null +++ b/config/experimental/auth/auth_test.go @@ -0,0 +1,280 @@ +package auth + +import ( + "fmt" + "reflect" + "sync" + "sync/atomic" + "testing" + "time" + + "golang.org/x/oauth2" +) + +type mockTokenSource func() (*oauth2.Token, error) + +func (m mockTokenSource) Token() (*oauth2.Token, error) { + return m() +} + +func TestNewCachedTokenSource_noCaching(t *testing.T) { + want := &cachedTokenSource{} + got := NewCachedTokenSource(want, nil) + if got != want { + t.Errorf("NewCachedTokenSource() = %v, want %v", got, want) + } +} + +func TestNewCachedTokenSource_default(t *testing.T) { + ts := mockTokenSource(func() (*oauth2.Token, error) { + return nil, nil + }) + + got, ok := NewCachedTokenSource(ts).(*cachedTokenSource) + if !ok { + t.Fatalf("NewCachedTokenSource() = %T, want *cachedTokenSource", got) + } + + if got.staleDuration != defaultStaleDuration { + t.Errorf("NewCachedTokenSource() staleDuration = %v, want %v", got.staleDuration, defaultStaleDuration) + } + if got.disableAsync != defaultDisableAsyncRefresh { + t.Errorf("NewCachedTokenSource() disableAsync = %v, want %v", got.disableAsync, defaultDisableAsyncRefresh) + } + if got.cachedToken != nil { + t.Errorf("NewCachedTokenSource() cachedToken = %v, want nil", got.cachedToken) + } +} + +func TestNewCachedTokenSource_options(t *testing.T) { + ts := mockTokenSource(func() (*oauth2.Token, error) { + return nil, nil + }) + + wantDisableAsync := false + wantCachedToken := &oauth2.Token{Expiry: time.Unix(42, 0)} + + opts := []Option{ + WithAsyncRefresh(!wantDisableAsync), + WithCachedToken(wantCachedToken), + } + + got, ok := NewCachedTokenSource(ts, opts...).(*cachedTokenSource) + if !ok { + t.Fatalf("NewCachedTokenSource() = %T, want *cachedTokenSource", got) + } + + if got.disableAsync != wantDisableAsync { + t.Errorf("NewCachedTokenSource(): disableAsync = %v, want %v", got.disableAsync, wantDisableAsync) + } + if got.cachedToken != wantCachedToken { + t.Errorf("NewCachedTokenSource(): cachedToken = %v, want %v", got.cachedToken, wantCachedToken) + } +} + +func TestCachedTokenSource_tokenState(t *testing.T) { + now := time.Unix(1337, 0) // mock value for time.Now() + + testCases := []struct { + token *oauth2.Token + staleDuration time.Duration + want tokenState + }{ + { + token: nil, + staleDuration: 10 * time.Minute, + want: expired, + }, + { + token: &oauth2.Token{ + Expiry: now.Add(-1 * time.Second), + }, + staleDuration: 10 * time.Minute, + want: expired, + }, + { + token: &oauth2.Token{ + Expiry: now.Add(1 * time.Hour), + }, + staleDuration: 10 * time.Minute, + want: fresh, + }, + { + token: &oauth2.Token{ + Expiry: now.Add(5 * time.Minute), + }, + staleDuration: 10 * time.Minute, + want: stale, + }, + } + + for _, tc := range testCases { + cts := &cachedTokenSource{ + cachedToken: tc.token, + staleDuration: tc.staleDuration, + disableAsync: false, + timeNow: func() time.Time { return now }, + } + + got := cts.tokenState() + + if got != tc.want { + t.Errorf("tokenState() = %v, want %v", got, tc.want) + } + } +} + +func TestCachedTokenSource_Token(t *testing.T) { + now := time.Unix(1337, 0) // mock value for time.Now() + nTokenCalls := 10 // number of goroutines calling Token() + testCases := []struct { + desc string // description of the test case + cachedToken *oauth2.Token // token cached before calling Token() + disableAsync bool // whether are disabled or not + refreshErr error // whether the cache was in error state + + returnedToken *oauth2.Token // token returned by the token source + returnedError error // error returned by the token source + + wantCalls int // expected number of calls to the token source + wantToken *oauth2.Token // expected token in the cache + }{ + { + desc: "[Blocking] no cached token", + disableAsync: true, + returnedToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + wantCalls: 1, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + }, + { + desc: "[Blocking] expired cached token", + disableAsync: true, + cachedToken: &oauth2.Token{Expiry: now.Add(-1 * time.Second)}, + returnedToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + wantCalls: 1, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + }, + { + desc: "[Blocking] fresh cached token", + disableAsync: true, + cachedToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + wantCalls: 0, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + }, + { + desc: "[Blocking] stale cached token", + disableAsync: true, + cachedToken: &oauth2.Token{Expiry: now.Add(1 * time.Minute)}, + wantCalls: 0, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Minute)}, + }, + { + desc: "[Blocking] refresh error", + disableAsync: true, + returnedError: fmt.Errorf("test error"), + wantCalls: 10, + }, + { + desc: "[Blocking] recover from error", + disableAsync: true, + refreshErr: fmt.Errorf("refresh error"), + cachedToken: &oauth2.Token{Expiry: now.Add(-1 * time.Minute)}, + returnedToken: &oauth2.Token{Expiry: now.Add(-1 * time.Hour)}, + wantCalls: 10, + wantToken: &oauth2.Token{Expiry: now.Add(-1 * time.Hour)}, + }, + { + desc: "[Async] no cached token", + returnedToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + wantCalls: 1, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + }, + { + desc: "[Async] no cached token", + returnedToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + wantCalls: 1, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + }, + { + desc: "[Async] expired cached token", + cachedToken: &oauth2.Token{Expiry: now.Add(-1 * time.Second)}, + returnedToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + wantCalls: 1, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + }, + { + desc: "[Async] fresh cached token", + cachedToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + wantCalls: 0, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + }, + { + desc: "[Async] stale cached token", + cachedToken: &oauth2.Token{Expiry: now.Add(1 * time.Minute)}, + returnedToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + wantCalls: 1, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Hour)}, + }, + { + desc: "[Async] refresh error", + cachedToken: &oauth2.Token{Expiry: now.Add(1 * time.Minute)}, + returnedError: fmt.Errorf("test error"), + wantCalls: 1, + wantToken: &oauth2.Token{Expiry: now.Add(1 * time.Minute)}, + }, + { + desc: "[Async] stale cached token, expired token returned", + cachedToken: &oauth2.Token{Expiry: now.Add(1 * time.Minute)}, + returnedToken: &oauth2.Token{Expiry: now.Add(-1 * time.Second)}, + wantCalls: 10, + wantToken: &oauth2.Token{Expiry: now.Add(-1 * time.Second)}, + }, + { + desc: "[Async] recover from error", + refreshErr: fmt.Errorf("refresh error"), + cachedToken: &oauth2.Token{Expiry: now.Add(-1 * time.Minute)}, + returnedToken: &oauth2.Token{Expiry: now.Add(-1 * time.Hour)}, + wantCalls: 10, + wantToken: &oauth2.Token{Expiry: now.Add(-1 * time.Hour)}, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + gotCalls := int32(0) + cts := &cachedTokenSource{ + disableAsync: tc.disableAsync, + staleDuration: 10 * time.Minute, + cachedToken: tc.cachedToken, + timeNow: func() time.Time { return now }, + tokenSource: mockTokenSource(func() (*oauth2.Token, error) { + atomic.AddInt32(&gotCalls, 1) + return tc.returnedToken, tc.returnedError + }), + } + + wg := sync.WaitGroup{} + for i := 0; i < nTokenCalls; i++ { + wg.Add(1) + go func() { + defer wg.Done() + cts.Token() + }() + } + + wg.Wait() + + // Wait for async refreshes to finish. This part is a little brittle + // but necessary to ensure that the async refresh is done before + // checking the results. + time.Sleep(10 * time.Millisecond) + + if int(gotCalls) != tc.wantCalls { + t.Errorf("want %d calls to cts.tokenSource.Token(), got %d", tc.wantCalls, gotCalls) + } + if !reflect.DeepEqual(tc.wantToken, cts.cachedToken) { + t.Errorf("want cached token %v, got %v", tc.wantToken, cts.cachedToken) + } + }) + } +} diff --git a/config/oauth_visitors.go b/config/oauth_visitors.go index 2b172bf1e..e9d3277c2 100644 --- a/config/oauth_visitors.go +++ b/config/oauth_visitors.go @@ -5,14 +5,16 @@ import ( "net/http" "time" + "github.com/databricks/databricks-sdk-go/config/experimental/auth" "golang.org/x/oauth2" ) -// serviceToServiceVisitor returns a visitor that sets the Authorization header to the token from the auth token source -// and the provided secondary header to the token from the secondary token source. -func serviceToServiceVisitor(auth, secondary oauth2.TokenSource, secondaryHeader string) func(r *http.Request) error { - refreshableAuth := oauth2.ReuseTokenSource(nil, auth) - refreshableSecondary := oauth2.ReuseTokenSource(nil, secondary) +// serviceToServiceVisitor returns a visitor that sets the Authorization header +// to the token from the auth token sourcevand the provided secondary header to +// the token from the secondary token source. +func serviceToServiceVisitor(primary, secondary oauth2.TokenSource, secondaryHeader string) func(r *http.Request) error { + refreshableAuth := auth.NewCachedTokenSource(primary) + refreshableSecondary := auth.NewCachedTokenSource(secondary) return func(r *http.Request) error { inner, err := refreshableAuth.Token() if err != nil { @@ -31,9 +33,9 @@ func serviceToServiceVisitor(auth, secondary oauth2.TokenSource, secondaryHeader // The same as serviceToServiceVisitor, but without a secondary token source. func refreshableVisitor(inner oauth2.TokenSource) func(r *http.Request) error { - refreshableAuth := oauth2.ReuseTokenSource(nil, inner) + cts := auth.NewCachedTokenSource(inner) return func(r *http.Request) error { - inner, err := refreshableAuth.Token() + inner, err := cts.Token() if err != nil { return fmt.Errorf("inner token: %w", err) } @@ -51,10 +53,32 @@ func azureVisitor(cfg *Config, inner func(*http.Request) error) func(*http.Reque } } -// azureReuseTokenSource calls into oauth2.ReuseTokenSourceWithExpiry with a 40 second expiry window. -// By default, the oauth2 library refreshes a token 10 seconds before it expires. -// Azure Databricks rejects tokens that expire in 30 seconds or less. -// We combine these and refresh the token 40 seconds before it expires. +// azureReuseTokenSource returns a cached token source that refreshes token 40 +// seconds before they expire. The reason for this is that Azure Databricks +// rejects tokens that expire in 30 seconds or less and we want to give a 10 +// second buffer. func azureReuseTokenSource(t *oauth2.Token, ts oauth2.TokenSource) oauth2.TokenSource { - return oauth2.ReuseTokenSourceWithExpiry(t, ts, 40*time.Second) + early := wrap(ts, func(t *oauth2.Token) *oauth2.Token { + t.Expiry = t.Expiry.Add(-40 * time.Second) + return t + }) + + return auth.NewCachedTokenSource(early, auth.WithCachedToken(t)) +} + +func wrap(ts oauth2.TokenSource, fn func(*oauth2.Token) *oauth2.Token) oauth2.TokenSource { + return &tokenSourceWrapper{fn: fn, inner: ts} +} + +type tokenSourceWrapper struct { + fn func(*oauth2.Token) *oauth2.Token + inner oauth2.TokenSource +} + +func (w *tokenSourceWrapper) Token() (*oauth2.Token, error) { + t, err := w.inner.Token() + if err != nil { + return nil, err + } + return w.fn(t), nil } From 7cb1883c857f55b1a3ceef90100d3de680eab4e6 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 3 Feb 2025 17:58:42 +0100 Subject: [PATCH 10/54] [Release] Release v0.57.0 (#1140) ### New Features and Improvements * Add support for async OAuth token refreshes ([#1135](https://github.com/databricks/databricks-sdk-go/pull/1135)). ### API Changes: * Added [a.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicyAPI) account-level service. * Added [a.EnableIpAccessLists](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableIpAccessListsAPI) account-level service. * Added [w.LakeviewEmbedded](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#LakeviewEmbeddedAPI) workspace-level service and [w.QueryExecution](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#QueryExecutionAPI) workspace-level service. * Added [w.RedashConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#RedashConfigAPI) workspace-level service. * Added `GcpOauthToken` field for [catalog.TemporaryCredentials](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#TemporaryCredentials). * Added `Options` field for [catalog.UpdateCatalog](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#UpdateCatalog). * Added `StatementId` field for [dashboards.QueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#QueryAttachment). * Added `EffectivePerformanceTarget` field for [jobs.BaseRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#BaseRun). * Added `PerformanceTarget` field for [jobs.CreateJob](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#CreateJob). * Added `PerformanceTarget` field for [jobs.JobSettings](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#JobSettings). * Added `EffectivePerformanceTarget` field for [jobs.Run](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Run). * Added `PerformanceTarget` field for [jobs.RunNow](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunNow). * Added `Disabled` and `EffectivePerformanceTarget` fields for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). * Added `UserAuthorizedScopes` field for [oauth2.CreateCustomAppIntegration](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateCustomAppIntegration). * Added `UserAuthorizedScopes` field for [oauth2.GetCustomAppIntegrationOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#GetCustomAppIntegrationOutput). * Added `UserAuthorizedScopes` field for [oauth2.UpdateCustomAppIntegration](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#UpdateCustomAppIntegration). * Added `Contents` field for [serving.HttpRequestResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#HttpRequestResponse). * Changed `HttpRequest` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service to type `HttpRequest` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service. * Changed `HttpRequest` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service to return [serving.HttpRequestResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#HttpRequestResponse). * Removed `SecurableKind` field for [catalog.CatalogInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogInfo). * Removed `SecurableKind` field for [catalog.ConnectionInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ConnectionInfo). * Removed `StatusCode` and `Text` fields for [serving.ExternalFunctionResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalFunctionResponse). OpenAPI SHA: c72c58f97b950fcb924a90ef164bcb10cfcd5ece, Date: 2025-02-03 --- .codegen/_openapi_sha | 2 +- CHANGELOG.md | 34 ++ account_client.go | 4 + experimental/mocks/mock_account_client.go | 20 + experimental/mocks/mock_workspace_client.go | 27 + .../billing/mock_budget_policy_interface.go | 478 ++++++++++++++++++ .../mock_lakeview_embedded_interface.go | 131 +++++ .../mock_query_execution_interface.go | 202 ++++++++ .../mock_serving_endpoints_interface.go | 14 +- .../mock_account_settings_interface.go | 47 ++ .../mock_enable_ip_access_lists_interface.go | 214 ++++++++ .../sql/mock_redash_config_interface.go | 95 ++++ service/billing/api.go | 123 ++++- service/billing/impl.go | 57 +++ service/billing/interface.go | 32 ++ service/billing/model.go | 191 +++++++ service/catalog/model.go | 143 +----- service/cleanrooms/api.go | 3 +- service/cleanrooms/interface.go | 5 +- service/compute/model.go | 27 + service/dashboards/api.go | 62 ++- service/dashboards/impl.go | 51 ++ service/dashboards/interface.go | 22 + service/dashboards/model.go | 141 ++++++ service/jobs/model.go | 68 ++- service/oauth2/model.go | 12 + service/pkg.go | 21 +- service/serving/api.go | 2 +- service/serving/impl.go | 10 +- service/serving/interface.go | 2 +- service/serving/model.go | 21 +- service/settings/api.go | 51 +- service/settings/impl.go | 36 ++ service/settings/interface.go | 21 + service/settings/model.go | 314 ++++++++++-- service/sql/api.go | 21 +- service/sql/impl.go | 15 + service/sql/interface.go | 7 + service/sql/model.go | 77 ++- version/version.go | 2 +- workspace_client.go | 13 + 41 files changed, 2585 insertions(+), 233 deletions(-) create mode 100644 experimental/mocks/service/billing/mock_budget_policy_interface.go create mode 100644 experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go create mode 100644 experimental/mocks/service/dashboards/mock_query_execution_interface.go create mode 100644 experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go create mode 100644 experimental/mocks/service/sql/mock_redash_config_interface.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 588cf9d63..9a95107e8 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -0be1b914249781b5e903b7676fd02255755bc851 \ No newline at end of file +c72c58f97b950fcb924a90ef164bcb10cfcd5ece \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 4a156deb1..28fee5466 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,39 @@ # Version changelog +## [Release] Release v0.57.0 + +### New Features and Improvements + + * Add support for async OAuth token refreshes ([#1135](https://github.com/databricks/databricks-sdk-go/pull/1135)). + + +### API Changes: + + * Added [a.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicyAPI) account-level service. + * Added [a.EnableIpAccessLists](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableIpAccessListsAPI) account-level service. + * Added [w.LakeviewEmbedded](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#LakeviewEmbeddedAPI) workspace-level service and [w.QueryExecution](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#QueryExecutionAPI) workspace-level service. + * Added [w.RedashConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#RedashConfigAPI) workspace-level service. + * Added `GcpOauthToken` field for [catalog.TemporaryCredentials](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#TemporaryCredentials). + * Added `Options` field for [catalog.UpdateCatalog](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#UpdateCatalog). + * Added `StatementId` field for [dashboards.QueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#QueryAttachment). + * Added `EffectivePerformanceTarget` field for [jobs.BaseRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#BaseRun). + * Added `PerformanceTarget` field for [jobs.CreateJob](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#CreateJob). + * Added `PerformanceTarget` field for [jobs.JobSettings](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#JobSettings). + * Added `EffectivePerformanceTarget` field for [jobs.Run](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Run). + * Added `PerformanceTarget` field for [jobs.RunNow](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunNow). + * Added `Disabled` and `EffectivePerformanceTarget` fields for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). + * Added `UserAuthorizedScopes` field for [oauth2.CreateCustomAppIntegration](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateCustomAppIntegration). + * Added `UserAuthorizedScopes` field for [oauth2.GetCustomAppIntegrationOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#GetCustomAppIntegrationOutput). + * Added `UserAuthorizedScopes` field for [oauth2.UpdateCustomAppIntegration](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#UpdateCustomAppIntegration). + * Added `Contents` field for [serving.HttpRequestResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#HttpRequestResponse). + * Changed `HttpRequest` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service to type `HttpRequest` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service. + * Changed `HttpRequest` method for [w.ServingEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointsAPI) workspace-level service to return [serving.HttpRequestResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#HttpRequestResponse). + * Removed `SecurableKind` field for [catalog.CatalogInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogInfo). + * Removed `SecurableKind` field for [catalog.ConnectionInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ConnectionInfo). + * Removed `StatusCode` and `Text` fields for [serving.ExternalFunctionResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalFunctionResponse). + +OpenAPI SHA: c72c58f97b950fcb924a90ef164bcb10cfcd5ece, Date: 2025-02-03 + ## [Release] Release v0.56.1 ### Bug Fixes diff --git a/account_client.go b/account_client.go index 50632e4f8..fb8aca2e6 100755 --- a/account_client.go +++ b/account_client.go @@ -29,6 +29,9 @@ type AccountClient struct { // account and date range. This feature works with all account types. BillableUsage billing.BillableUsageInterface + // A service serves REST API about Budget policies + BudgetPolicy billing.BudgetPolicyInterface + // These APIs manage credential configurations for this workspace. // Databricks needs access to a cross-account service IAM role in your AWS // account so that Databricks can deploy clusters in the appropriate VPC for @@ -414,6 +417,7 @@ func NewAccountClient(c ...*Config) (*AccountClient, error) { AccessControl: iam.NewAccountAccessControl(apiClient), BillableUsage: billing.NewBillableUsage(apiClient), + BudgetPolicy: billing.NewBudgetPolicy(apiClient), Credentials: provisioning.NewCredentials(apiClient), CustomAppIntegration: oauth2.NewCustomAppIntegration(apiClient), EncryptionKeys: provisioning.NewEncryptionKeys(apiClient), diff --git a/experimental/mocks/mock_account_client.go b/experimental/mocks/mock_account_client.go index af59c5a2e..453038368 100755 --- a/experimental/mocks/mock_account_client.go +++ b/experimental/mocks/mock_account_client.go @@ -32,6 +32,7 @@ func NewMockAccountClient(t interface { AccessControl: iam.NewMockAccountAccessControlInterface(t), BillableUsage: billing.NewMockBillableUsageInterface(t), + BudgetPolicy: billing.NewMockBudgetPolicyInterface(t), Credentials: provisioning.NewMockCredentialsInterface(t), CustomAppIntegration: oauth2.NewMockCustomAppIntegrationInterface(t), EncryptionKeys: provisioning.NewMockEncryptionKeysInterface(t), @@ -69,6 +70,9 @@ func NewMockAccountClient(t interface { mockDisableLegacyFeatures := settings.NewMockDisableLegacyFeaturesInterface(t) mockAccountSettingsAPI.On("DisableLegacyFeatures").Return(mockDisableLegacyFeatures).Maybe() + mockEnableIpAccessLists := settings.NewMockEnableIpAccessListsInterface(t) + mockAccountSettingsAPI.On("EnableIpAccessLists").Return(mockEnableIpAccessLists).Maybe() + mockEsmEnablementAccount := settings.NewMockEsmEnablementAccountInterface(t) mockAccountSettingsAPI.On("EsmEnablementAccount").Return(mockEsmEnablementAccount).Maybe() @@ -94,6 +98,14 @@ func (m *MockAccountClient) GetMockDisableLegacyFeaturesAPI() *settings.MockDisa return api } +func (m *MockAccountClient) GetMockEnableIpAccessListsAPI() *settings.MockEnableIpAccessListsInterface { + api, ok := m.GetMockAccountSettingsAPI().EnableIpAccessLists().(*settings.MockEnableIpAccessListsInterface) + if !ok { + panic(fmt.Sprintf("expected EnableIpAccessLists to be *settings.MockEnableIpAccessListsInterface, actual was %T", m.GetMockAccountSettingsAPI().EnableIpAccessLists())) + } + return api +} + func (m *MockAccountClient) GetMockEsmEnablementAccountAPI() *settings.MockEsmEnablementAccountInterface { api, ok := m.GetMockAccountSettingsAPI().EsmEnablementAccount().(*settings.MockEsmEnablementAccountInterface) if !ok { @@ -126,6 +138,14 @@ func (m *MockAccountClient) GetMockBillableUsageAPI() *billing.MockBillableUsage return api } +func (m *MockAccountClient) GetMockBudgetPolicyAPI() *billing.MockBudgetPolicyInterface { + api, ok := m.AccountClient.BudgetPolicy.(*billing.MockBudgetPolicyInterface) + if !ok { + panic(fmt.Sprintf("expected BudgetPolicy to be *billing.MockBudgetPolicyInterface, actual was %T", m.AccountClient.BudgetPolicy)) + } + return api +} + func (m *MockAccountClient) GetMockCredentialsAPI() *provisioning.MockCredentialsInterface { api, ok := m.AccountClient.Credentials.(*provisioning.MockCredentialsInterface) if !ok { diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index c46e8663d..ca2bffb53 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -82,6 +82,7 @@ func NewMockWorkspaceClient(t interface { IpAccessLists: settings.NewMockIpAccessListsInterface(t), Jobs: jobs.NewMockJobsInterface(t), Lakeview: dashboards.NewMockLakeviewInterface(t), + LakeviewEmbedded: dashboards.NewMockLakeviewEmbeddedInterface(t), Libraries: compute.NewMockLibrariesInterface(t), Metastores: catalog.NewMockMetastoresInterface(t), ModelRegistry: ml.NewMockModelRegistryInterface(t), @@ -105,11 +106,13 @@ func NewMockWorkspaceClient(t interface { QualityMonitors: catalog.NewMockQualityMonitorsInterface(t), Queries: sql.NewMockQueriesInterface(t), QueriesLegacy: sql.NewMockQueriesLegacyInterface(t), + QueryExecution: dashboards.NewMockQueryExecutionInterface(t), QueryHistory: sql.NewMockQueryHistoryInterface(t), QueryVisualizations: sql.NewMockQueryVisualizationsInterface(t), QueryVisualizationsLegacy: sql.NewMockQueryVisualizationsLegacyInterface(t), RecipientActivation: sharing.NewMockRecipientActivationInterface(t), Recipients: sharing.NewMockRecipientsInterface(t), + RedashConfig: sql.NewMockRedashConfigInterface(t), RegisteredModels: catalog.NewMockRegisteredModelsInterface(t), Repos: workspace.NewMockReposInterface(t), ResourceQuotas: catalog.NewMockResourceQuotasInterface(t), @@ -571,6 +574,14 @@ func (m *MockWorkspaceClient) GetMockLakeviewAPI() *dashboards.MockLakeviewInter return api } +func (m *MockWorkspaceClient) GetMockLakeviewEmbeddedAPI() *dashboards.MockLakeviewEmbeddedInterface { + api, ok := m.WorkspaceClient.LakeviewEmbedded.(*dashboards.MockLakeviewEmbeddedInterface) + if !ok { + panic(fmt.Sprintf("expected LakeviewEmbedded to be *dashboards.MockLakeviewEmbeddedInterface, actual was %T", m.WorkspaceClient.LakeviewEmbedded)) + } + return api +} + func (m *MockWorkspaceClient) GetMockLibrariesAPI() *compute.MockLibrariesInterface { api, ok := m.WorkspaceClient.Libraries.(*compute.MockLibrariesInterface) if !ok { @@ -755,6 +766,14 @@ func (m *MockWorkspaceClient) GetMockQueriesLegacyAPI() *sql.MockQueriesLegacyIn return api } +func (m *MockWorkspaceClient) GetMockQueryExecutionAPI() *dashboards.MockQueryExecutionInterface { + api, ok := m.WorkspaceClient.QueryExecution.(*dashboards.MockQueryExecutionInterface) + if !ok { + panic(fmt.Sprintf("expected QueryExecution to be *dashboards.MockQueryExecutionInterface, actual was %T", m.WorkspaceClient.QueryExecution)) + } + return api +} + func (m *MockWorkspaceClient) GetMockQueryHistoryAPI() *sql.MockQueryHistoryInterface { api, ok := m.WorkspaceClient.QueryHistory.(*sql.MockQueryHistoryInterface) if !ok { @@ -795,6 +814,14 @@ func (m *MockWorkspaceClient) GetMockRecipientsAPI() *sharing.MockRecipientsInte return api } +func (m *MockWorkspaceClient) GetMockRedashConfigAPI() *sql.MockRedashConfigInterface { + api, ok := m.WorkspaceClient.RedashConfig.(*sql.MockRedashConfigInterface) + if !ok { + panic(fmt.Sprintf("expected RedashConfig to be *sql.MockRedashConfigInterface, actual was %T", m.WorkspaceClient.RedashConfig)) + } + return api +} + func (m *MockWorkspaceClient) GetMockRegisteredModelsAPI() *catalog.MockRegisteredModelsInterface { api, ok := m.WorkspaceClient.RegisteredModels.(*catalog.MockRegisteredModelsInterface) if !ok { diff --git a/experimental/mocks/service/billing/mock_budget_policy_interface.go b/experimental/mocks/service/billing/mock_budget_policy_interface.go new file mode 100644 index 000000000..7028b86fc --- /dev/null +++ b/experimental/mocks/service/billing/mock_budget_policy_interface.go @@ -0,0 +1,478 @@ +// Code generated by mockery v2.43.0. DO NOT EDIT. + +package billing + +import ( + context "context" + + billing "github.com/databricks/databricks-sdk-go/service/billing" + + listing "github.com/databricks/databricks-sdk-go/listing" + + mock "github.com/stretchr/testify/mock" +) + +// MockBudgetPolicyInterface is an autogenerated mock type for the BudgetPolicyInterface type +type MockBudgetPolicyInterface struct { + mock.Mock +} + +type MockBudgetPolicyInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockBudgetPolicyInterface) EXPECT() *MockBudgetPolicyInterface_Expecter { + return &MockBudgetPolicyInterface_Expecter{mock: &_m.Mock} +} + +// Create provides a mock function with given fields: ctx, request +func (_m *MockBudgetPolicyInterface) Create(ctx context.Context, request billing.CreateBudgetPolicyRequest) (*billing.BudgetPolicy, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Create") + } + + var r0 *billing.BudgetPolicy + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, billing.CreateBudgetPolicyRequest) (*billing.BudgetPolicy, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, billing.CreateBudgetPolicyRequest) *billing.BudgetPolicy); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*billing.BudgetPolicy) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, billing.CreateBudgetPolicyRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBudgetPolicyInterface_Create_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Create' +type MockBudgetPolicyInterface_Create_Call struct { + *mock.Call +} + +// Create is a helper method to define mock.On call +// - ctx context.Context +// - request billing.CreateBudgetPolicyRequest +func (_e *MockBudgetPolicyInterface_Expecter) Create(ctx interface{}, request interface{}) *MockBudgetPolicyInterface_Create_Call { + return &MockBudgetPolicyInterface_Create_Call{Call: _e.mock.On("Create", ctx, request)} +} + +func (_c *MockBudgetPolicyInterface_Create_Call) Run(run func(ctx context.Context, request billing.CreateBudgetPolicyRequest)) *MockBudgetPolicyInterface_Create_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(billing.CreateBudgetPolicyRequest)) + }) + return _c +} + +func (_c *MockBudgetPolicyInterface_Create_Call) Return(_a0 *billing.BudgetPolicy, _a1 error) *MockBudgetPolicyInterface_Create_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBudgetPolicyInterface_Create_Call) RunAndReturn(run func(context.Context, billing.CreateBudgetPolicyRequest) (*billing.BudgetPolicy, error)) *MockBudgetPolicyInterface_Create_Call { + _c.Call.Return(run) + return _c +} + +// Delete provides a mock function with given fields: ctx, request +func (_m *MockBudgetPolicyInterface) Delete(ctx context.Context, request billing.DeleteBudgetPolicyRequest) error { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, billing.DeleteBudgetPolicyRequest) error); ok { + r0 = rf(ctx, request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockBudgetPolicyInterface_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type MockBudgetPolicyInterface_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - request billing.DeleteBudgetPolicyRequest +func (_e *MockBudgetPolicyInterface_Expecter) Delete(ctx interface{}, request interface{}) *MockBudgetPolicyInterface_Delete_Call { + return &MockBudgetPolicyInterface_Delete_Call{Call: _e.mock.On("Delete", ctx, request)} +} + +func (_c *MockBudgetPolicyInterface_Delete_Call) Run(run func(ctx context.Context, request billing.DeleteBudgetPolicyRequest)) *MockBudgetPolicyInterface_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(billing.DeleteBudgetPolicyRequest)) + }) + return _c +} + +func (_c *MockBudgetPolicyInterface_Delete_Call) Return(_a0 error) *MockBudgetPolicyInterface_Delete_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBudgetPolicyInterface_Delete_Call) RunAndReturn(run func(context.Context, billing.DeleteBudgetPolicyRequest) error) *MockBudgetPolicyInterface_Delete_Call { + _c.Call.Return(run) + return _c +} + +// DeleteByPolicyId provides a mock function with given fields: ctx, policyId +func (_m *MockBudgetPolicyInterface) DeleteByPolicyId(ctx context.Context, policyId string) error { + ret := _m.Called(ctx, policyId) + + if len(ret) == 0 { + panic("no return value specified for DeleteByPolicyId") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, policyId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockBudgetPolicyInterface_DeleteByPolicyId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteByPolicyId' +type MockBudgetPolicyInterface_DeleteByPolicyId_Call struct { + *mock.Call +} + +// DeleteByPolicyId is a helper method to define mock.On call +// - ctx context.Context +// - policyId string +func (_e *MockBudgetPolicyInterface_Expecter) DeleteByPolicyId(ctx interface{}, policyId interface{}) *MockBudgetPolicyInterface_DeleteByPolicyId_Call { + return &MockBudgetPolicyInterface_DeleteByPolicyId_Call{Call: _e.mock.On("DeleteByPolicyId", ctx, policyId)} +} + +func (_c *MockBudgetPolicyInterface_DeleteByPolicyId_Call) Run(run func(ctx context.Context, policyId string)) *MockBudgetPolicyInterface_DeleteByPolicyId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockBudgetPolicyInterface_DeleteByPolicyId_Call) Return(_a0 error) *MockBudgetPolicyInterface_DeleteByPolicyId_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBudgetPolicyInterface_DeleteByPolicyId_Call) RunAndReturn(run func(context.Context, string) error) *MockBudgetPolicyInterface_DeleteByPolicyId_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, request +func (_m *MockBudgetPolicyInterface) Get(ctx context.Context, request billing.GetBudgetPolicyRequest) (*billing.BudgetPolicy, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *billing.BudgetPolicy + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, billing.GetBudgetPolicyRequest) (*billing.BudgetPolicy, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, billing.GetBudgetPolicyRequest) *billing.BudgetPolicy); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*billing.BudgetPolicy) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, billing.GetBudgetPolicyRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBudgetPolicyInterface_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type MockBudgetPolicyInterface_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - request billing.GetBudgetPolicyRequest +func (_e *MockBudgetPolicyInterface_Expecter) Get(ctx interface{}, request interface{}) *MockBudgetPolicyInterface_Get_Call { + return &MockBudgetPolicyInterface_Get_Call{Call: _e.mock.On("Get", ctx, request)} +} + +func (_c *MockBudgetPolicyInterface_Get_Call) Run(run func(ctx context.Context, request billing.GetBudgetPolicyRequest)) *MockBudgetPolicyInterface_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(billing.GetBudgetPolicyRequest)) + }) + return _c +} + +func (_c *MockBudgetPolicyInterface_Get_Call) Return(_a0 *billing.BudgetPolicy, _a1 error) *MockBudgetPolicyInterface_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBudgetPolicyInterface_Get_Call) RunAndReturn(run func(context.Context, billing.GetBudgetPolicyRequest) (*billing.BudgetPolicy, error)) *MockBudgetPolicyInterface_Get_Call { + _c.Call.Return(run) + return _c +} + +// GetByPolicyId provides a mock function with given fields: ctx, policyId +func (_m *MockBudgetPolicyInterface) GetByPolicyId(ctx context.Context, policyId string) (*billing.BudgetPolicy, error) { + ret := _m.Called(ctx, policyId) + + if len(ret) == 0 { + panic("no return value specified for GetByPolicyId") + } + + var r0 *billing.BudgetPolicy + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*billing.BudgetPolicy, error)); ok { + return rf(ctx, policyId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *billing.BudgetPolicy); ok { + r0 = rf(ctx, policyId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*billing.BudgetPolicy) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, policyId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBudgetPolicyInterface_GetByPolicyId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByPolicyId' +type MockBudgetPolicyInterface_GetByPolicyId_Call struct { + *mock.Call +} + +// GetByPolicyId is a helper method to define mock.On call +// - ctx context.Context +// - policyId string +func (_e *MockBudgetPolicyInterface_Expecter) GetByPolicyId(ctx interface{}, policyId interface{}) *MockBudgetPolicyInterface_GetByPolicyId_Call { + return &MockBudgetPolicyInterface_GetByPolicyId_Call{Call: _e.mock.On("GetByPolicyId", ctx, policyId)} +} + +func (_c *MockBudgetPolicyInterface_GetByPolicyId_Call) Run(run func(ctx context.Context, policyId string)) *MockBudgetPolicyInterface_GetByPolicyId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockBudgetPolicyInterface_GetByPolicyId_Call) Return(_a0 *billing.BudgetPolicy, _a1 error) *MockBudgetPolicyInterface_GetByPolicyId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBudgetPolicyInterface_GetByPolicyId_Call) RunAndReturn(run func(context.Context, string) (*billing.BudgetPolicy, error)) *MockBudgetPolicyInterface_GetByPolicyId_Call { + _c.Call.Return(run) + return _c +} + +// List provides a mock function with given fields: ctx, request +func (_m *MockBudgetPolicyInterface) List(ctx context.Context, request billing.ListBudgetPoliciesRequest) listing.Iterator[billing.BudgetPolicy] { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for List") + } + + var r0 listing.Iterator[billing.BudgetPolicy] + if rf, ok := ret.Get(0).(func(context.Context, billing.ListBudgetPoliciesRequest) listing.Iterator[billing.BudgetPolicy]); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(listing.Iterator[billing.BudgetPolicy]) + } + } + + return r0 +} + +// MockBudgetPolicyInterface_List_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'List' +type MockBudgetPolicyInterface_List_Call struct { + *mock.Call +} + +// List is a helper method to define mock.On call +// - ctx context.Context +// - request billing.ListBudgetPoliciesRequest +func (_e *MockBudgetPolicyInterface_Expecter) List(ctx interface{}, request interface{}) *MockBudgetPolicyInterface_List_Call { + return &MockBudgetPolicyInterface_List_Call{Call: _e.mock.On("List", ctx, request)} +} + +func (_c *MockBudgetPolicyInterface_List_Call) Run(run func(ctx context.Context, request billing.ListBudgetPoliciesRequest)) *MockBudgetPolicyInterface_List_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(billing.ListBudgetPoliciesRequest)) + }) + return _c +} + +func (_c *MockBudgetPolicyInterface_List_Call) Return(_a0 listing.Iterator[billing.BudgetPolicy]) *MockBudgetPolicyInterface_List_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockBudgetPolicyInterface_List_Call) RunAndReturn(run func(context.Context, billing.ListBudgetPoliciesRequest) listing.Iterator[billing.BudgetPolicy]) *MockBudgetPolicyInterface_List_Call { + _c.Call.Return(run) + return _c +} + +// ListAll provides a mock function with given fields: ctx, request +func (_m *MockBudgetPolicyInterface) ListAll(ctx context.Context, request billing.ListBudgetPoliciesRequest) ([]billing.BudgetPolicy, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ListAll") + } + + var r0 []billing.BudgetPolicy + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, billing.ListBudgetPoliciesRequest) ([]billing.BudgetPolicy, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, billing.ListBudgetPoliciesRequest) []billing.BudgetPolicy); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]billing.BudgetPolicy) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, billing.ListBudgetPoliciesRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBudgetPolicyInterface_ListAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAll' +type MockBudgetPolicyInterface_ListAll_Call struct { + *mock.Call +} + +// ListAll is a helper method to define mock.On call +// - ctx context.Context +// - request billing.ListBudgetPoliciesRequest +func (_e *MockBudgetPolicyInterface_Expecter) ListAll(ctx interface{}, request interface{}) *MockBudgetPolicyInterface_ListAll_Call { + return &MockBudgetPolicyInterface_ListAll_Call{Call: _e.mock.On("ListAll", ctx, request)} +} + +func (_c *MockBudgetPolicyInterface_ListAll_Call) Run(run func(ctx context.Context, request billing.ListBudgetPoliciesRequest)) *MockBudgetPolicyInterface_ListAll_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(billing.ListBudgetPoliciesRequest)) + }) + return _c +} + +func (_c *MockBudgetPolicyInterface_ListAll_Call) Return(_a0 []billing.BudgetPolicy, _a1 error) *MockBudgetPolicyInterface_ListAll_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBudgetPolicyInterface_ListAll_Call) RunAndReturn(run func(context.Context, billing.ListBudgetPoliciesRequest) ([]billing.BudgetPolicy, error)) *MockBudgetPolicyInterface_ListAll_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: ctx, request +func (_m *MockBudgetPolicyInterface) Update(ctx context.Context, request billing.UpdateBudgetPolicyRequest) (*billing.BudgetPolicy, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 *billing.BudgetPolicy + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, billing.UpdateBudgetPolicyRequest) (*billing.BudgetPolicy, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, billing.UpdateBudgetPolicyRequest) *billing.BudgetPolicy); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*billing.BudgetPolicy) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, billing.UpdateBudgetPolicyRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockBudgetPolicyInterface_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type MockBudgetPolicyInterface_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - ctx context.Context +// - request billing.UpdateBudgetPolicyRequest +func (_e *MockBudgetPolicyInterface_Expecter) Update(ctx interface{}, request interface{}) *MockBudgetPolicyInterface_Update_Call { + return &MockBudgetPolicyInterface_Update_Call{Call: _e.mock.On("Update", ctx, request)} +} + +func (_c *MockBudgetPolicyInterface_Update_Call) Run(run func(ctx context.Context, request billing.UpdateBudgetPolicyRequest)) *MockBudgetPolicyInterface_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(billing.UpdateBudgetPolicyRequest)) + }) + return _c +} + +func (_c *MockBudgetPolicyInterface_Update_Call) Return(_a0 *billing.BudgetPolicy, _a1 error) *MockBudgetPolicyInterface_Update_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockBudgetPolicyInterface_Update_Call) RunAndReturn(run func(context.Context, billing.UpdateBudgetPolicyRequest) (*billing.BudgetPolicy, error)) *MockBudgetPolicyInterface_Update_Call { + _c.Call.Return(run) + return _c +} + +// NewMockBudgetPolicyInterface creates a new instance of MockBudgetPolicyInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockBudgetPolicyInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockBudgetPolicyInterface { + mock := &MockBudgetPolicyInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go b/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go new file mode 100644 index 000000000..d606527b0 --- /dev/null +++ b/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go @@ -0,0 +1,131 @@ +// Code generated by mockery v2.43.0. DO NOT EDIT. + +package dashboards + +import ( + context "context" + + dashboards "github.com/databricks/databricks-sdk-go/service/dashboards" + mock "github.com/stretchr/testify/mock" +) + +// MockLakeviewEmbeddedInterface is an autogenerated mock type for the LakeviewEmbeddedInterface type +type MockLakeviewEmbeddedInterface struct { + mock.Mock +} + +type MockLakeviewEmbeddedInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockLakeviewEmbeddedInterface) EXPECT() *MockLakeviewEmbeddedInterface_Expecter { + return &MockLakeviewEmbeddedInterface_Expecter{mock: &_m.Mock} +} + +// GetPublishedDashboardEmbedded provides a mock function with given fields: ctx, request +func (_m *MockLakeviewEmbeddedInterface) GetPublishedDashboardEmbedded(ctx context.Context, request dashboards.GetPublishedDashboardEmbeddedRequest) error { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetPublishedDashboardEmbedded") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GetPublishedDashboardEmbeddedRequest) error); ok { + r0 = rf(ctx, request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPublishedDashboardEmbedded' +type MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call struct { + *mock.Call +} + +// GetPublishedDashboardEmbedded is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GetPublishedDashboardEmbeddedRequest +func (_e *MockLakeviewEmbeddedInterface_Expecter) GetPublishedDashboardEmbedded(ctx interface{}, request interface{}) *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call { + return &MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call{Call: _e.mock.On("GetPublishedDashboardEmbedded", ctx, request)} +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call) Run(run func(ctx context.Context, request dashboards.GetPublishedDashboardEmbeddedRequest)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GetPublishedDashboardEmbeddedRequest)) + }) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call) Return(_a0 error) *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call) RunAndReturn(run func(context.Context, dashboards.GetPublishedDashboardEmbeddedRequest) error) *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbedded_Call { + _c.Call.Return(run) + return _c +} + +// GetPublishedDashboardEmbeddedByDashboardId provides a mock function with given fields: ctx, dashboardId +func (_m *MockLakeviewEmbeddedInterface) GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error { + ret := _m.Called(ctx, dashboardId) + + if len(ret) == 0 { + panic("no return value specified for GetPublishedDashboardEmbeddedByDashboardId") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, dashboardId) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPublishedDashboardEmbeddedByDashboardId' +type MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call struct { + *mock.Call +} + +// GetPublishedDashboardEmbeddedByDashboardId is a helper method to define mock.On call +// - ctx context.Context +// - dashboardId string +func (_e *MockLakeviewEmbeddedInterface_Expecter) GetPublishedDashboardEmbeddedByDashboardId(ctx interface{}, dashboardId interface{}) *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call { + return &MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call{Call: _e.mock.On("GetPublishedDashboardEmbeddedByDashboardId", ctx, dashboardId)} +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call) Run(run func(ctx context.Context, dashboardId string)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call) Return(_a0 error) *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call) RunAndReturn(run func(context.Context, string) error) *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboardId_Call { + _c.Call.Return(run) + return _c +} + +// NewMockLakeviewEmbeddedInterface creates a new instance of MockLakeviewEmbeddedInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockLakeviewEmbeddedInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockLakeviewEmbeddedInterface { + mock := &MockLakeviewEmbeddedInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/dashboards/mock_query_execution_interface.go b/experimental/mocks/service/dashboards/mock_query_execution_interface.go new file mode 100644 index 000000000..e3c3b6e90 --- /dev/null +++ b/experimental/mocks/service/dashboards/mock_query_execution_interface.go @@ -0,0 +1,202 @@ +// Code generated by mockery v2.43.0. DO NOT EDIT. + +package dashboards + +import ( + context "context" + + dashboards "github.com/databricks/databricks-sdk-go/service/dashboards" + mock "github.com/stretchr/testify/mock" +) + +// MockQueryExecutionInterface is an autogenerated mock type for the QueryExecutionInterface type +type MockQueryExecutionInterface struct { + mock.Mock +} + +type MockQueryExecutionInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockQueryExecutionInterface) EXPECT() *MockQueryExecutionInterface_Expecter { + return &MockQueryExecutionInterface_Expecter{mock: &_m.Mock} +} + +// CancelPublishedQueryExecution provides a mock function with given fields: ctx, request +func (_m *MockQueryExecutionInterface) CancelPublishedQueryExecution(ctx context.Context, request dashboards.CancelPublishedQueryExecutionRequest) (*dashboards.CancelQueryExecutionResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for CancelPublishedQueryExecution") + } + + var r0 *dashboards.CancelQueryExecutionResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.CancelPublishedQueryExecutionRequest) (*dashboards.CancelQueryExecutionResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.CancelPublishedQueryExecutionRequest) *dashboards.CancelQueryExecutionResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.CancelQueryExecutionResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.CancelPublishedQueryExecutionRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQueryExecutionInterface_CancelPublishedQueryExecution_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CancelPublishedQueryExecution' +type MockQueryExecutionInterface_CancelPublishedQueryExecution_Call struct { + *mock.Call +} + +// CancelPublishedQueryExecution is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.CancelPublishedQueryExecutionRequest +func (_e *MockQueryExecutionInterface_Expecter) CancelPublishedQueryExecution(ctx interface{}, request interface{}) *MockQueryExecutionInterface_CancelPublishedQueryExecution_Call { + return &MockQueryExecutionInterface_CancelPublishedQueryExecution_Call{Call: _e.mock.On("CancelPublishedQueryExecution", ctx, request)} +} + +func (_c *MockQueryExecutionInterface_CancelPublishedQueryExecution_Call) Run(run func(ctx context.Context, request dashboards.CancelPublishedQueryExecutionRequest)) *MockQueryExecutionInterface_CancelPublishedQueryExecution_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.CancelPublishedQueryExecutionRequest)) + }) + return _c +} + +func (_c *MockQueryExecutionInterface_CancelPublishedQueryExecution_Call) Return(_a0 *dashboards.CancelQueryExecutionResponse, _a1 error) *MockQueryExecutionInterface_CancelPublishedQueryExecution_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQueryExecutionInterface_CancelPublishedQueryExecution_Call) RunAndReturn(run func(context.Context, dashboards.CancelPublishedQueryExecutionRequest) (*dashboards.CancelQueryExecutionResponse, error)) *MockQueryExecutionInterface_CancelPublishedQueryExecution_Call { + _c.Call.Return(run) + return _c +} + +// ExecutePublishedDashboardQuery provides a mock function with given fields: ctx, request +func (_m *MockQueryExecutionInterface) ExecutePublishedDashboardQuery(ctx context.Context, request dashboards.ExecutePublishedDashboardQueryRequest) error { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ExecutePublishedDashboardQuery") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.ExecutePublishedDashboardQueryRequest) error); ok { + r0 = rf(ctx, request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecutePublishedDashboardQuery' +type MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call struct { + *mock.Call +} + +// ExecutePublishedDashboardQuery is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.ExecutePublishedDashboardQueryRequest +func (_e *MockQueryExecutionInterface_Expecter) ExecutePublishedDashboardQuery(ctx interface{}, request interface{}) *MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call { + return &MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call{Call: _e.mock.On("ExecutePublishedDashboardQuery", ctx, request)} +} + +func (_c *MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call) Run(run func(ctx context.Context, request dashboards.ExecutePublishedDashboardQueryRequest)) *MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.ExecutePublishedDashboardQueryRequest)) + }) + return _c +} + +func (_c *MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call) Return(_a0 error) *MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call) RunAndReturn(run func(context.Context, dashboards.ExecutePublishedDashboardQueryRequest) error) *MockQueryExecutionInterface_ExecutePublishedDashboardQuery_Call { + _c.Call.Return(run) + return _c +} + +// PollPublishedQueryStatus provides a mock function with given fields: ctx, request +func (_m *MockQueryExecutionInterface) PollPublishedQueryStatus(ctx context.Context, request dashboards.PollPublishedQueryStatusRequest) (*dashboards.PollQueryStatusResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for PollPublishedQueryStatus") + } + + var r0 *dashboards.PollQueryStatusResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.PollPublishedQueryStatusRequest) (*dashboards.PollQueryStatusResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.PollPublishedQueryStatusRequest) *dashboards.PollQueryStatusResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.PollQueryStatusResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.PollPublishedQueryStatusRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockQueryExecutionInterface_PollPublishedQueryStatus_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PollPublishedQueryStatus' +type MockQueryExecutionInterface_PollPublishedQueryStatus_Call struct { + *mock.Call +} + +// PollPublishedQueryStatus is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.PollPublishedQueryStatusRequest +func (_e *MockQueryExecutionInterface_Expecter) PollPublishedQueryStatus(ctx interface{}, request interface{}) *MockQueryExecutionInterface_PollPublishedQueryStatus_Call { + return &MockQueryExecutionInterface_PollPublishedQueryStatus_Call{Call: _e.mock.On("PollPublishedQueryStatus", ctx, request)} +} + +func (_c *MockQueryExecutionInterface_PollPublishedQueryStatus_Call) Run(run func(ctx context.Context, request dashboards.PollPublishedQueryStatusRequest)) *MockQueryExecutionInterface_PollPublishedQueryStatus_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.PollPublishedQueryStatusRequest)) + }) + return _c +} + +func (_c *MockQueryExecutionInterface_PollPublishedQueryStatus_Call) Return(_a0 *dashboards.PollQueryStatusResponse, _a1 error) *MockQueryExecutionInterface_PollPublishedQueryStatus_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockQueryExecutionInterface_PollPublishedQueryStatus_Call) RunAndReturn(run func(context.Context, dashboards.PollPublishedQueryStatusRequest) (*dashboards.PollQueryStatusResponse, error)) *MockQueryExecutionInterface_PollPublishedQueryStatus_Call { + _c.Call.Return(run) + return _c +} + +// NewMockQueryExecutionInterface creates a new instance of MockQueryExecutionInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockQueryExecutionInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockQueryExecutionInterface { + mock := &MockQueryExecutionInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/serving/mock_serving_endpoints_interface.go b/experimental/mocks/service/serving/mock_serving_endpoints_interface.go index 84c106815..a4cdcf157 100644 --- a/experimental/mocks/service/serving/mock_serving_endpoints_interface.go +++ b/experimental/mocks/service/serving/mock_serving_endpoints_interface.go @@ -965,23 +965,23 @@ func (_c *MockServingEndpointsInterface_GetPermissionsByServingEndpointId_Call) } // HttpRequest provides a mock function with given fields: ctx, request -func (_m *MockServingEndpointsInterface) HttpRequest(ctx context.Context, request serving.ExternalFunctionRequest) (*serving.ExternalFunctionResponse, error) { +func (_m *MockServingEndpointsInterface) HttpRequest(ctx context.Context, request serving.ExternalFunctionRequest) (*serving.HttpRequestResponse, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for HttpRequest") } - var r0 *serving.ExternalFunctionResponse + var r0 *serving.HttpRequestResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, serving.ExternalFunctionRequest) (*serving.ExternalFunctionResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, serving.ExternalFunctionRequest) (*serving.HttpRequestResponse, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, serving.ExternalFunctionRequest) *serving.ExternalFunctionResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, serving.ExternalFunctionRequest) *serving.HttpRequestResponse); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*serving.ExternalFunctionResponse) + r0 = ret.Get(0).(*serving.HttpRequestResponse) } } @@ -1013,12 +1013,12 @@ func (_c *MockServingEndpointsInterface_HttpRequest_Call) Run(run func(ctx conte return _c } -func (_c *MockServingEndpointsInterface_HttpRequest_Call) Return(_a0 *serving.ExternalFunctionResponse, _a1 error) *MockServingEndpointsInterface_HttpRequest_Call { +func (_c *MockServingEndpointsInterface_HttpRequest_Call) Return(_a0 *serving.HttpRequestResponse, _a1 error) *MockServingEndpointsInterface_HttpRequest_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockServingEndpointsInterface_HttpRequest_Call) RunAndReturn(run func(context.Context, serving.ExternalFunctionRequest) (*serving.ExternalFunctionResponse, error)) *MockServingEndpointsInterface_HttpRequest_Call { +func (_c *MockServingEndpointsInterface_HttpRequest_Call) RunAndReturn(run func(context.Context, serving.ExternalFunctionRequest) (*serving.HttpRequestResponse, error)) *MockServingEndpointsInterface_HttpRequest_Call { _c.Call.Return(run) return _c } diff --git a/experimental/mocks/service/settings/mock_account_settings_interface.go b/experimental/mocks/service/settings/mock_account_settings_interface.go index 978c4cc7e..82c9a3546 100644 --- a/experimental/mocks/service/settings/mock_account_settings_interface.go +++ b/experimental/mocks/service/settings/mock_account_settings_interface.go @@ -114,6 +114,53 @@ func (_c *MockAccountSettingsInterface_DisableLegacyFeatures_Call) RunAndReturn( return _c } +// EnableIpAccessLists provides a mock function with given fields: +func (_m *MockAccountSettingsInterface) EnableIpAccessLists() settings.EnableIpAccessListsInterface { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnableIpAccessLists") + } + + var r0 settings.EnableIpAccessListsInterface + if rf, ok := ret.Get(0).(func() settings.EnableIpAccessListsInterface); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(settings.EnableIpAccessListsInterface) + } + } + + return r0 +} + +// MockAccountSettingsInterface_EnableIpAccessLists_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnableIpAccessLists' +type MockAccountSettingsInterface_EnableIpAccessLists_Call struct { + *mock.Call +} + +// EnableIpAccessLists is a helper method to define mock.On call +func (_e *MockAccountSettingsInterface_Expecter) EnableIpAccessLists() *MockAccountSettingsInterface_EnableIpAccessLists_Call { + return &MockAccountSettingsInterface_EnableIpAccessLists_Call{Call: _e.mock.On("EnableIpAccessLists")} +} + +func (_c *MockAccountSettingsInterface_EnableIpAccessLists_Call) Run(run func()) *MockAccountSettingsInterface_EnableIpAccessLists_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockAccountSettingsInterface_EnableIpAccessLists_Call) Return(_a0 settings.EnableIpAccessListsInterface) *MockAccountSettingsInterface_EnableIpAccessLists_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockAccountSettingsInterface_EnableIpAccessLists_Call) RunAndReturn(run func() settings.EnableIpAccessListsInterface) *MockAccountSettingsInterface_EnableIpAccessLists_Call { + _c.Call.Return(run) + return _c +} + // EsmEnablementAccount provides a mock function with given fields: func (_m *MockAccountSettingsInterface) EsmEnablementAccount() settings.EsmEnablementAccountInterface { ret := _m.Called() diff --git a/experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go b/experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go new file mode 100644 index 000000000..54868cbe8 --- /dev/null +++ b/experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go @@ -0,0 +1,214 @@ +// Code generated by mockery v2.43.0. DO NOT EDIT. + +package settings + +import ( + context "context" + + settings "github.com/databricks/databricks-sdk-go/service/settings" + mock "github.com/stretchr/testify/mock" +) + +// MockEnableIpAccessListsInterface is an autogenerated mock type for the EnableIpAccessListsInterface type +type MockEnableIpAccessListsInterface struct { + mock.Mock +} + +type MockEnableIpAccessListsInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockEnableIpAccessListsInterface) EXPECT() *MockEnableIpAccessListsInterface_Expecter { + return &MockEnableIpAccessListsInterface_Expecter{mock: &_m.Mock} +} + +// Delete provides a mock function with given fields: ctx, request +func (_m *MockEnableIpAccessListsInterface) Delete(ctx context.Context, request settings.DeleteAccountIpAccessEnableRequest) (*settings.DeleteAccountIpAccessEnableResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Delete") + } + + var r0 *settings.DeleteAccountIpAccessEnableResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.DeleteAccountIpAccessEnableRequest) (*settings.DeleteAccountIpAccessEnableResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.DeleteAccountIpAccessEnableRequest) *settings.DeleteAccountIpAccessEnableResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.DeleteAccountIpAccessEnableResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.DeleteAccountIpAccessEnableRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableIpAccessListsInterface_Delete_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Delete' +type MockEnableIpAccessListsInterface_Delete_Call struct { + *mock.Call +} + +// Delete is a helper method to define mock.On call +// - ctx context.Context +// - request settings.DeleteAccountIpAccessEnableRequest +func (_e *MockEnableIpAccessListsInterface_Expecter) Delete(ctx interface{}, request interface{}) *MockEnableIpAccessListsInterface_Delete_Call { + return &MockEnableIpAccessListsInterface_Delete_Call{Call: _e.mock.On("Delete", ctx, request)} +} + +func (_c *MockEnableIpAccessListsInterface_Delete_Call) Run(run func(ctx context.Context, request settings.DeleteAccountIpAccessEnableRequest)) *MockEnableIpAccessListsInterface_Delete_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.DeleteAccountIpAccessEnableRequest)) + }) + return _c +} + +func (_c *MockEnableIpAccessListsInterface_Delete_Call) Return(_a0 *settings.DeleteAccountIpAccessEnableResponse, _a1 error) *MockEnableIpAccessListsInterface_Delete_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableIpAccessListsInterface_Delete_Call) RunAndReturn(run func(context.Context, settings.DeleteAccountIpAccessEnableRequest) (*settings.DeleteAccountIpAccessEnableResponse, error)) *MockEnableIpAccessListsInterface_Delete_Call { + _c.Call.Return(run) + return _c +} + +// Get provides a mock function with given fields: ctx, request +func (_m *MockEnableIpAccessListsInterface) Get(ctx context.Context, request settings.GetAccountIpAccessEnableRequest) (*settings.AccountIpAccessEnable, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Get") + } + + var r0 *settings.AccountIpAccessEnable + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.GetAccountIpAccessEnableRequest) (*settings.AccountIpAccessEnable, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.GetAccountIpAccessEnableRequest) *settings.AccountIpAccessEnable); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.AccountIpAccessEnable) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.GetAccountIpAccessEnableRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableIpAccessListsInterface_Get_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Get' +type MockEnableIpAccessListsInterface_Get_Call struct { + *mock.Call +} + +// Get is a helper method to define mock.On call +// - ctx context.Context +// - request settings.GetAccountIpAccessEnableRequest +func (_e *MockEnableIpAccessListsInterface_Expecter) Get(ctx interface{}, request interface{}) *MockEnableIpAccessListsInterface_Get_Call { + return &MockEnableIpAccessListsInterface_Get_Call{Call: _e.mock.On("Get", ctx, request)} +} + +func (_c *MockEnableIpAccessListsInterface_Get_Call) Run(run func(ctx context.Context, request settings.GetAccountIpAccessEnableRequest)) *MockEnableIpAccessListsInterface_Get_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.GetAccountIpAccessEnableRequest)) + }) + return _c +} + +func (_c *MockEnableIpAccessListsInterface_Get_Call) Return(_a0 *settings.AccountIpAccessEnable, _a1 error) *MockEnableIpAccessListsInterface_Get_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableIpAccessListsInterface_Get_Call) RunAndReturn(run func(context.Context, settings.GetAccountIpAccessEnableRequest) (*settings.AccountIpAccessEnable, error)) *MockEnableIpAccessListsInterface_Get_Call { + _c.Call.Return(run) + return _c +} + +// Update provides a mock function with given fields: ctx, request +func (_m *MockEnableIpAccessListsInterface) Update(ctx context.Context, request settings.UpdateAccountIpAccessEnableRequest) (*settings.AccountIpAccessEnable, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for Update") + } + + var r0 *settings.AccountIpAccessEnable + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateAccountIpAccessEnableRequest) (*settings.AccountIpAccessEnable, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateAccountIpAccessEnableRequest) *settings.AccountIpAccessEnable); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.AccountIpAccessEnable) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateAccountIpAccessEnableRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableIpAccessListsInterface_Update_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'Update' +type MockEnableIpAccessListsInterface_Update_Call struct { + *mock.Call +} + +// Update is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateAccountIpAccessEnableRequest +func (_e *MockEnableIpAccessListsInterface_Expecter) Update(ctx interface{}, request interface{}) *MockEnableIpAccessListsInterface_Update_Call { + return &MockEnableIpAccessListsInterface_Update_Call{Call: _e.mock.On("Update", ctx, request)} +} + +func (_c *MockEnableIpAccessListsInterface_Update_Call) Run(run func(ctx context.Context, request settings.UpdateAccountIpAccessEnableRequest)) *MockEnableIpAccessListsInterface_Update_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateAccountIpAccessEnableRequest)) + }) + return _c +} + +func (_c *MockEnableIpAccessListsInterface_Update_Call) Return(_a0 *settings.AccountIpAccessEnable, _a1 error) *MockEnableIpAccessListsInterface_Update_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableIpAccessListsInterface_Update_Call) RunAndReturn(run func(context.Context, settings.UpdateAccountIpAccessEnableRequest) (*settings.AccountIpAccessEnable, error)) *MockEnableIpAccessListsInterface_Update_Call { + _c.Call.Return(run) + return _c +} + +// NewMockEnableIpAccessListsInterface creates a new instance of MockEnableIpAccessListsInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockEnableIpAccessListsInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockEnableIpAccessListsInterface { + mock := &MockEnableIpAccessListsInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/sql/mock_redash_config_interface.go b/experimental/mocks/service/sql/mock_redash_config_interface.go new file mode 100644 index 000000000..e96d5957d --- /dev/null +++ b/experimental/mocks/service/sql/mock_redash_config_interface.go @@ -0,0 +1,95 @@ +// Code generated by mockery v2.43.0. DO NOT EDIT. + +package sql + +import ( + context "context" + + sql "github.com/databricks/databricks-sdk-go/service/sql" + mock "github.com/stretchr/testify/mock" +) + +// MockRedashConfigInterface is an autogenerated mock type for the RedashConfigInterface type +type MockRedashConfigInterface struct { + mock.Mock +} + +type MockRedashConfigInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockRedashConfigInterface) EXPECT() *MockRedashConfigInterface_Expecter { + return &MockRedashConfigInterface_Expecter{mock: &_m.Mock} +} + +// GetConfig provides a mock function with given fields: ctx +func (_m *MockRedashConfigInterface) GetConfig(ctx context.Context) (*sql.ClientConfig, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetConfig") + } + + var r0 *sql.ClientConfig + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*sql.ClientConfig, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *sql.ClientConfig); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.ClientConfig) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockRedashConfigInterface_GetConfig_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetConfig' +type MockRedashConfigInterface_GetConfig_Call struct { + *mock.Call +} + +// GetConfig is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockRedashConfigInterface_Expecter) GetConfig(ctx interface{}) *MockRedashConfigInterface_GetConfig_Call { + return &MockRedashConfigInterface_GetConfig_Call{Call: _e.mock.On("GetConfig", ctx)} +} + +func (_c *MockRedashConfigInterface_GetConfig_Call) Run(run func(ctx context.Context)) *MockRedashConfigInterface_GetConfig_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockRedashConfigInterface_GetConfig_Call) Return(_a0 *sql.ClientConfig, _a1 error) *MockRedashConfigInterface_GetConfig_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockRedashConfigInterface_GetConfig_Call) RunAndReturn(run func(context.Context) (*sql.ClientConfig, error)) *MockRedashConfigInterface_GetConfig_Call { + _c.Call.Return(run) + return _c +} + +// NewMockRedashConfigInterface creates a new instance of MockRedashConfigInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockRedashConfigInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockRedashConfigInterface { + mock := &MockRedashConfigInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/service/billing/api.go b/service/billing/api.go index 83e8daff2..ae62bb149 100755 --- a/service/billing/api.go +++ b/service/billing/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Billable Usage, Budgets, Log Delivery, Usage Dashboards, etc. +// These APIs allow you to manage Billable Usage, Budget Policy, Budgets, Log Delivery, Usage Dashboards, etc. package billing import ( @@ -43,6 +43,127 @@ type BillableUsageAPI struct { billableUsageImpl } +type BudgetPolicyInterface interface { + + // Create a budget policy. + // + // Creates a new policy. + Create(ctx context.Context, request CreateBudgetPolicyRequest) (*BudgetPolicy, error) + + // Delete a budget policy. + // + // Deletes a policy + Delete(ctx context.Context, request DeleteBudgetPolicyRequest) error + + // Delete a budget policy. + // + // Deletes a policy + DeleteByPolicyId(ctx context.Context, policyId string) error + + // Get a budget policy. + // + // Retrieves a policy by it's ID. + Get(ctx context.Context, request GetBudgetPolicyRequest) (*BudgetPolicy, error) + + // Get a budget policy. + // + // Retrieves a policy by it's ID. + GetByPolicyId(ctx context.Context, policyId string) (*BudgetPolicy, error) + + // List policies. + // + // Lists all policies. Policies are returned in the alphabetically ascending + // order of their names. + // + // This method is generated by Databricks SDK Code Generator. + List(ctx context.Context, request ListBudgetPoliciesRequest) listing.Iterator[BudgetPolicy] + + // List policies. + // + // Lists all policies. Policies are returned in the alphabetically ascending + // order of their names. + // + // This method is generated by Databricks SDK Code Generator. + ListAll(ctx context.Context, request ListBudgetPoliciesRequest) ([]BudgetPolicy, error) + + // Update a budget policy. + // + // Updates a policy + Update(ctx context.Context, request UpdateBudgetPolicyRequest) (*BudgetPolicy, error) +} + +func NewBudgetPolicy(client *client.DatabricksClient) *BudgetPolicyAPI { + return &BudgetPolicyAPI{ + budgetPolicyImpl: budgetPolicyImpl{ + client: client, + }, + } +} + +// A service serves REST API about Budget policies +type BudgetPolicyAPI struct { + budgetPolicyImpl +} + +// Delete a budget policy. +// +// Deletes a policy +func (a *BudgetPolicyAPI) DeleteByPolicyId(ctx context.Context, policyId string) error { + return a.budgetPolicyImpl.Delete(ctx, DeleteBudgetPolicyRequest{ + PolicyId: policyId, + }) +} + +// Get a budget policy. +// +// Retrieves a policy by it's ID. +func (a *BudgetPolicyAPI) GetByPolicyId(ctx context.Context, policyId string) (*BudgetPolicy, error) { + return a.budgetPolicyImpl.Get(ctx, GetBudgetPolicyRequest{ + PolicyId: policyId, + }) +} + +// List policies. +// +// Lists all policies. Policies are returned in the alphabetically ascending +// order of their names. +// +// This method is generated by Databricks SDK Code Generator. +func (a *BudgetPolicyAPI) List(ctx context.Context, request ListBudgetPoliciesRequest) listing.Iterator[BudgetPolicy] { + + getNextPage := func(ctx context.Context, req ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.budgetPolicyImpl.List(ctx, req) + } + getItems := func(resp *ListBudgetPoliciesResponse) []BudgetPolicy { + return resp.Policies + } + getNextReq := func(resp *ListBudgetPoliciesResponse) *ListBudgetPoliciesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List policies. +// +// Lists all policies. Policies are returned in the alphabetically ascending +// order of their names. +// +// This method is generated by Databricks SDK Code Generator. +func (a *BudgetPolicyAPI) ListAll(ctx context.Context, request ListBudgetPoliciesRequest) ([]BudgetPolicy, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[BudgetPolicy](ctx, iterator) +} + type BudgetsInterface interface { // Create new budget. diff --git a/service/billing/impl.go b/service/billing/impl.go index 85942be54..330e181ee 100755 --- a/service/billing/impl.go +++ b/service/billing/impl.go @@ -25,6 +25,63 @@ func (a *billableUsageImpl) Download(ctx context.Context, request DownloadReques return &downloadResponse, err } +// unexported type that holds implementations of just BudgetPolicy API methods +type budgetPolicyImpl struct { + client *client.DatabricksClient +} + +func (a *budgetPolicyImpl) Create(ctx context.Context, request CreateBudgetPolicyRequest) (*BudgetPolicy, error) { + var budgetPolicy BudgetPolicy + path := fmt.Sprintf("/api/2.1/accounts/%v/budget-policies", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &budgetPolicy) + return &budgetPolicy, err +} + +func (a *budgetPolicyImpl) Delete(ctx context.Context, request DeleteBudgetPolicyRequest) error { + var deleteResponse DeleteResponse + path := fmt.Sprintf("/api/2.1/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) + return err +} + +func (a *budgetPolicyImpl) Get(ctx context.Context, request GetBudgetPolicyRequest) (*BudgetPolicy, error) { + var budgetPolicy BudgetPolicy + path := fmt.Sprintf("/api/2.1/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &budgetPolicy) + return &budgetPolicy, err +} + +func (a *budgetPolicyImpl) List(ctx context.Context, request ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { + var listBudgetPoliciesResponse ListBudgetPoliciesResponse + path := fmt.Sprintf("/api/2.1/accounts/%v/budget-policies", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listBudgetPoliciesResponse) + return &listBudgetPoliciesResponse, err +} + +func (a *budgetPolicyImpl) Update(ctx context.Context, request UpdateBudgetPolicyRequest) (*BudgetPolicy, error) { + var budgetPolicy BudgetPolicy + path := fmt.Sprintf("/api/2.1/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.Policy, &budgetPolicy) + return &budgetPolicy, err +} + // unexported type that holds implementations of just budgets API methods type budgetsImpl struct { client *client.DatabricksClient diff --git a/service/billing/interface.go b/service/billing/interface.go index c67e4d8e8..b5d50594c 100755 --- a/service/billing/interface.go +++ b/service/billing/interface.go @@ -26,6 +26,38 @@ type BillableUsageService interface { Download(ctx context.Context, request DownloadRequest) (*DownloadResponse, error) } +// A service serves REST API about Budget policies +type BudgetPolicyService interface { + + // Create a budget policy. + // + // Creates a new policy. + Create(ctx context.Context, request CreateBudgetPolicyRequest) (*BudgetPolicy, error) + + // Delete a budget policy. + // + // Deletes a policy + Delete(ctx context.Context, request DeleteBudgetPolicyRequest) error + + // Get a budget policy. + // + // Retrieves a policy by it's ID. + Get(ctx context.Context, request GetBudgetPolicyRequest) (*BudgetPolicy, error) + + // List policies. + // + // Lists all policies. Policies are returned in the alphabetically ascending + // order of their names. + // + // Use ListAll() to get all BudgetPolicy instances, which will iterate over every result page. + List(ctx context.Context, request ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) + + // Update a budget policy. + // + // Updates a policy + Update(ctx context.Context, request UpdateBudgetPolicyRequest) (*BudgetPolicy, error) +} + // These APIs manage budget configurations for this account. Budgets enable you // to monitor usage across your account. You can set up budgets to either track // account-wide spending, or apply filters to track the spending of specific diff --git a/service/billing/model.go b/service/billing/model.go index e1aec066d..1829f9c72 100644 --- a/service/billing/model.go +++ b/service/billing/model.go @@ -7,6 +7,7 @@ import ( "io" "github.com/databricks/databricks-sdk-go/marshal" + "github.com/databricks/databricks-sdk-go/service/compute" ) type ActionConfiguration struct { @@ -250,6 +251,29 @@ type BudgetConfigurationFilterWorkspaceIdClause struct { Values []int64 `json:"values,omitempty"` } +// Contains the BudgetPolicy details. +type BudgetPolicy struct { + // A list of tags defined by the customer. At most 20 entries are allowed + // per policy. + CustomTags []compute.CustomPolicyTag `json:"custom_tags,omitempty"` + // The Id of the policy. This field is generated by Databricks and globally + // unique. + PolicyId string `json:"policy_id"` + // The name of the policy. - Must be unique among active policies. - Can + // contain only characters from the ISO 8859-1 (latin1) set. + PolicyName string `json:"policy_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *BudgetPolicy) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BudgetPolicy) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type CreateBillingUsageDashboardRequest struct { // Workspace level usage dashboard shows usage data for the specified // workspace ID. Global level usage dashboard shows usage data for all @@ -364,6 +388,31 @@ type CreateBudgetConfigurationResponse struct { Budget *BudgetConfiguration `json:"budget,omitempty"` } +// A request to create a BudgetPolicy. +type CreateBudgetPolicyRequest struct { + // A list of tags defined by the customer. At most 40 entries are allowed + // per policy. + CustomTags []compute.CustomPolicyTag `json:"custom_tags,omitempty"` + // The name of the policy. - Must be unique among active policies. - Can + // contain only characters of 0-9, a-z, A-Z, -, =, ., :, /, @, _, +, + // whitespace. + PolicyName string `json:"policy_name,omitempty"` + // A unique identifier for this request. Restricted to 36 ASCII characters. + // A random UUID is recommended. This request is only idempotent if a + // `request_id` is provided. + RequestId string `json:"request_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CreateBudgetPolicyRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateBudgetPolicyRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type CreateLogDeliveryConfigurationParams struct { // The optional human-readable name of the log delivery configuration. // Defaults to empty. @@ -455,6 +504,15 @@ type DeleteBudgetConfigurationRequest struct { type DeleteBudgetConfigurationResponse struct { } +// Delete a budget policy +type DeleteBudgetPolicyRequest struct { + // The Id of the policy. + PolicyId string `json:"-" url:"-"` +} + +type DeleteResponse struct { +} + // The status string for log delivery. Possible values are: * `CREATED`: There // were no log delivery attempts since the config was created. * `SUCCEEDED`: // The latest attempt of log delivery has succeeded completely. * @@ -533,6 +591,30 @@ type DownloadResponse struct { Contents io.ReadCloser `json:"-"` } +// Structured representation of a filter to be applied to a list of policies. +// All specified filters will be applied in conjunction. +type Filter struct { + // The policy creator user id to be filtered on. If unspecified, all + // policies will be returned. + CreatorUserId int64 `json:"creator_user_id,omitempty" url:"creator_user_id,omitempty"` + // The policy creator user name to be filtered on. If unspecified, all + // policies will be returned. + CreatorUserName string `json:"creator_user_name,omitempty" url:"creator_user_name,omitempty"` + // The partial name of policies to be filtered on. If unspecified, all + // policies will be returned. + PolicyName string `json:"policy_name,omitempty" url:"policy_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *Filter) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Filter) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get usage dashboard type GetBillingUsageDashboardRequest struct { // Workspace level usage dashboard shows usage data for the specified @@ -581,6 +663,12 @@ type GetBudgetConfigurationResponse struct { Budget *BudgetConfiguration `json:"budget,omitempty"` } +// Get a budget policy +type GetBudgetPolicyRequest struct { + // The Id of the policy. + PolicyId string `json:"-" url:"-"` +} + // Get log delivery configuration type GetLogDeliveryRequest struct { // Databricks log delivery configuration ID @@ -622,6 +710,58 @@ func (s ListBudgetConfigurationsResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// List policies +type ListBudgetPoliciesRequest struct { + // A filter to apply to the list of policies. + FilterBy *Filter `json:"-" url:"filter_by,omitempty"` + // The maximum number of budget policies to return. If unspecified, at most + // 100 budget policies will be returned. The maximum value is 1000; values + // above 1000 will be coerced to 1000. + PageSize int `json:"-" url:"page_size,omitempty"` + // A page token, received from a previous `ListServerlessPolicies` call. + // Provide this to retrieve the subsequent page. If unspecified, the first + // page will be returned. + // + // When paginating, all other parameters provided to + // `ListServerlessPoliciesRequest` must match the call that provided the + // page token. + PageToken string `json:"-" url:"page_token,omitempty"` + // The sort specification. + SortSpec *SortSpec `json:"-" url:"sort_spec,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListBudgetPoliciesRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListBudgetPoliciesRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A list of policies. +type ListBudgetPoliciesResponse struct { + // A token that can be sent as `page_token` to retrieve the next page. If + // this field is omitted, there are no subsequent pages. + NextPageToken string `json:"next_page_token,omitempty"` + + Policies []BudgetPolicy `json:"policies,omitempty"` + // A token that can be sent as `page_token` to retrieve the previous page. + // In this field is omitted, there are no previous pages. + PreviousPageToken string `json:"previous_page_token,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ListBudgetPoliciesResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListBudgetPoliciesResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get all log delivery configurations type ListLogDeliveryRequest struct { // Filter by credential configuration ID. @@ -880,6 +1020,48 @@ func (f *OutputFormat) Type() string { type PatchStatusResponse struct { } +type SortSpec struct { + // Whether to sort in descending order. + Descending bool `json:"descending,omitempty" url:"descending,omitempty"` + // The filed to sort by + Field SortSpecField `json:"field,omitempty" url:"field,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SortSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SortSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SortSpecField string + +const SortSpecFieldPolicyName SortSpecField = `POLICY_NAME` + +// String representation for [fmt.Print] +func (f *SortSpecField) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SortSpecField) Set(v string) error { + switch v { + case `POLICY_NAME`: + *f = SortSpecField(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "POLICY_NAME"`, v) + } +} + +// Type always returns SortSpecField to satisfy [pflag.Value] interface +func (f *SortSpecField) Type() string { + return "SortSpecField" +} + type UpdateBudgetConfigurationBudget struct { // Databricks account ID. AccountId string `json:"account_id,omitempty"` @@ -920,6 +1102,15 @@ type UpdateBudgetConfigurationResponse struct { Budget *BudgetConfiguration `json:"budget,omitempty"` } +// Update a budget policy +type UpdateBudgetPolicyRequest struct { + // Contains the BudgetPolicy details. + Policy *BudgetPolicy `json:"policy,omitempty"` + // The Id of the policy. This field is generated by Databricks and globally + // unique. + PolicyId string `json:"-" url:"-"` +} + type UpdateLogDeliveryConfigurationStatusRequest struct { // Databricks log delivery configuration ID LogDeliveryConfigurationId string `json:"-" url:"-"` diff --git a/service/catalog/model.go b/service/catalog/model.go index 4493e9b3e..010abbdc6 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -378,8 +378,6 @@ type CatalogInfo struct { ProviderName string `json:"provider_name,omitempty"` // Status of an asynchronously provisioned resource. ProvisioningInfo *ProvisioningInfo `json:"provisioning_info,omitempty"` - // Kind of catalog securable. - SecurableKind CatalogInfoSecurableKind `json:"securable_kind,omitempty"` SecurableType string `json:"securable_type,omitempty"` // The name of the share under the share provider. @@ -404,56 +402,6 @@ func (s CatalogInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Kind of catalog securable. -type CatalogInfoSecurableKind string - -const CatalogInfoSecurableKindCatalogDeltasharing CatalogInfoSecurableKind = `CATALOG_DELTASHARING` - -const CatalogInfoSecurableKindCatalogForeignBigquery CatalogInfoSecurableKind = `CATALOG_FOREIGN_BIGQUERY` - -const CatalogInfoSecurableKindCatalogForeignDatabricks CatalogInfoSecurableKind = `CATALOG_FOREIGN_DATABRICKS` - -const CatalogInfoSecurableKindCatalogForeignMysql CatalogInfoSecurableKind = `CATALOG_FOREIGN_MYSQL` - -const CatalogInfoSecurableKindCatalogForeignPostgresql CatalogInfoSecurableKind = `CATALOG_FOREIGN_POSTGRESQL` - -const CatalogInfoSecurableKindCatalogForeignRedshift CatalogInfoSecurableKind = `CATALOG_FOREIGN_REDSHIFT` - -const CatalogInfoSecurableKindCatalogForeignSnowflake CatalogInfoSecurableKind = `CATALOG_FOREIGN_SNOWFLAKE` - -const CatalogInfoSecurableKindCatalogForeignSqldw CatalogInfoSecurableKind = `CATALOG_FOREIGN_SQLDW` - -const CatalogInfoSecurableKindCatalogForeignSqlserver CatalogInfoSecurableKind = `CATALOG_FOREIGN_SQLSERVER` - -const CatalogInfoSecurableKindCatalogInternal CatalogInfoSecurableKind = `CATALOG_INTERNAL` - -const CatalogInfoSecurableKindCatalogStandard CatalogInfoSecurableKind = `CATALOG_STANDARD` - -const CatalogInfoSecurableKindCatalogSystem CatalogInfoSecurableKind = `CATALOG_SYSTEM` - -const CatalogInfoSecurableKindCatalogSystemDeltasharing CatalogInfoSecurableKind = `CATALOG_SYSTEM_DELTASHARING` - -// String representation for [fmt.Print] -func (f *CatalogInfoSecurableKind) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *CatalogInfoSecurableKind) Set(v string) error { - switch v { - case `CATALOG_DELTASHARING`, `CATALOG_FOREIGN_BIGQUERY`, `CATALOG_FOREIGN_DATABRICKS`, `CATALOG_FOREIGN_MYSQL`, `CATALOG_FOREIGN_POSTGRESQL`, `CATALOG_FOREIGN_REDSHIFT`, `CATALOG_FOREIGN_SNOWFLAKE`, `CATALOG_FOREIGN_SQLDW`, `CATALOG_FOREIGN_SQLSERVER`, `CATALOG_INTERNAL`, `CATALOG_STANDARD`, `CATALOG_SYSTEM`, `CATALOG_SYSTEM_DELTASHARING`: - *f = CatalogInfoSecurableKind(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "CATALOG_DELTASHARING", "CATALOG_FOREIGN_BIGQUERY", "CATALOG_FOREIGN_DATABRICKS", "CATALOG_FOREIGN_MYSQL", "CATALOG_FOREIGN_POSTGRESQL", "CATALOG_FOREIGN_REDSHIFT", "CATALOG_FOREIGN_SNOWFLAKE", "CATALOG_FOREIGN_SQLDW", "CATALOG_FOREIGN_SQLSERVER", "CATALOG_INTERNAL", "CATALOG_STANDARD", "CATALOG_SYSTEM", "CATALOG_SYSTEM_DELTASHARING"`, v) - } -} - -// Type always returns CatalogInfoSecurableKind to satisfy [pflag.Value] interface -func (f *CatalogInfoSecurableKind) Type() string { - return "CatalogInfoSecurableKind" -} - // Whether the current securable is accessible from all workspaces or a specific // set of workspaces. type CatalogIsolationMode string @@ -676,8 +624,6 @@ type ConnectionInfo struct { ProvisioningInfo *ProvisioningInfo `json:"provisioning_info,omitempty"` // If the connection is read only. ReadOnly bool `json:"read_only,omitempty"` - // Kind of connection securable. - SecurableKind ConnectionInfoSecurableKind `json:"securable_kind,omitempty"` SecurableType string `json:"securable_type,omitempty"` // Time at which this connection was updated, in epoch milliseconds. @@ -698,56 +644,6 @@ func (s ConnectionInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Kind of connection securable. -type ConnectionInfoSecurableKind string - -const ConnectionInfoSecurableKindConnectionBigquery ConnectionInfoSecurableKind = `CONNECTION_BIGQUERY` - -const ConnectionInfoSecurableKindConnectionBuiltinHiveMetastore ConnectionInfoSecurableKind = `CONNECTION_BUILTIN_HIVE_METASTORE` - -const ConnectionInfoSecurableKindConnectionDatabricks ConnectionInfoSecurableKind = `CONNECTION_DATABRICKS` - -const ConnectionInfoSecurableKindConnectionExternalHiveMetastore ConnectionInfoSecurableKind = `CONNECTION_EXTERNAL_HIVE_METASTORE` - -const ConnectionInfoSecurableKindConnectionGlue ConnectionInfoSecurableKind = `CONNECTION_GLUE` - -const ConnectionInfoSecurableKindConnectionHttpBearer ConnectionInfoSecurableKind = `CONNECTION_HTTP_BEARER` - -const ConnectionInfoSecurableKindConnectionMysql ConnectionInfoSecurableKind = `CONNECTION_MYSQL` - -const ConnectionInfoSecurableKindConnectionOnlineCatalog ConnectionInfoSecurableKind = `CONNECTION_ONLINE_CATALOG` - -const ConnectionInfoSecurableKindConnectionPostgresql ConnectionInfoSecurableKind = `CONNECTION_POSTGRESQL` - -const ConnectionInfoSecurableKindConnectionRedshift ConnectionInfoSecurableKind = `CONNECTION_REDSHIFT` - -const ConnectionInfoSecurableKindConnectionSnowflake ConnectionInfoSecurableKind = `CONNECTION_SNOWFLAKE` - -const ConnectionInfoSecurableKindConnectionSqldw ConnectionInfoSecurableKind = `CONNECTION_SQLDW` - -const ConnectionInfoSecurableKindConnectionSqlserver ConnectionInfoSecurableKind = `CONNECTION_SQLSERVER` - -// String representation for [fmt.Print] -func (f *ConnectionInfoSecurableKind) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *ConnectionInfoSecurableKind) Set(v string) error { - switch v { - case `CONNECTION_BIGQUERY`, `CONNECTION_BUILTIN_HIVE_METASTORE`, `CONNECTION_DATABRICKS`, `CONNECTION_EXTERNAL_HIVE_METASTORE`, `CONNECTION_GLUE`, `CONNECTION_HTTP_BEARER`, `CONNECTION_MYSQL`, `CONNECTION_ONLINE_CATALOG`, `CONNECTION_POSTGRESQL`, `CONNECTION_REDSHIFT`, `CONNECTION_SNOWFLAKE`, `CONNECTION_SQLDW`, `CONNECTION_SQLSERVER`: - *f = ConnectionInfoSecurableKind(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "CONNECTION_BIGQUERY", "CONNECTION_BUILTIN_HIVE_METASTORE", "CONNECTION_DATABRICKS", "CONNECTION_EXTERNAL_HIVE_METASTORE", "CONNECTION_GLUE", "CONNECTION_HTTP_BEARER", "CONNECTION_MYSQL", "CONNECTION_ONLINE_CATALOG", "CONNECTION_POSTGRESQL", "CONNECTION_REDSHIFT", "CONNECTION_SNOWFLAKE", "CONNECTION_SQLDW", "CONNECTION_SQLSERVER"`, v) - } -} - -// Type always returns ConnectionInfoSecurableKind to satisfy [pflag.Value] interface -func (f *ConnectionInfoSecurableKind) Type() string { - return "ConnectionInfoSecurableKind" -} - // The type of connection. type ConnectionType string @@ -4928,33 +4824,35 @@ type SecurablePropertiesMap map[string]string // The type of Unity Catalog securable type SecurableType string -const SecurableTypeCatalog SecurableType = `catalog` +const SecurableTypeCatalog SecurableType = `CATALOG` + +const SecurableTypeCleanRoom SecurableType = `CLEAN_ROOM` -const SecurableTypeConnection SecurableType = `connection` +const SecurableTypeConnection SecurableType = `CONNECTION` -const SecurableTypeCredential SecurableType = `credential` +const SecurableTypeCredential SecurableType = `CREDENTIAL` -const SecurableTypeExternalLocation SecurableType = `external_location` +const SecurableTypeExternalLocation SecurableType = `EXTERNAL_LOCATION` -const SecurableTypeFunction SecurableType = `function` +const SecurableTypeFunction SecurableType = `FUNCTION` -const SecurableTypeMetastore SecurableType = `metastore` +const SecurableTypeMetastore SecurableType = `METASTORE` -const SecurableTypePipeline SecurableType = `pipeline` +const SecurableTypePipeline SecurableType = `PIPELINE` -const SecurableTypeProvider SecurableType = `provider` +const SecurableTypeProvider SecurableType = `PROVIDER` -const SecurableTypeRecipient SecurableType = `recipient` +const SecurableTypeRecipient SecurableType = `RECIPIENT` -const SecurableTypeSchema SecurableType = `schema` +const SecurableTypeSchema SecurableType = `SCHEMA` -const SecurableTypeShare SecurableType = `share` +const SecurableTypeShare SecurableType = `SHARE` -const SecurableTypeStorageCredential SecurableType = `storage_credential` +const SecurableTypeStorageCredential SecurableType = `STORAGE_CREDENTIAL` -const SecurableTypeTable SecurableType = `table` +const SecurableTypeTable SecurableType = `TABLE` -const SecurableTypeVolume SecurableType = `volume` +const SecurableTypeVolume SecurableType = `VOLUME` // String representation for [fmt.Print] func (f *SecurableType) String() string { @@ -4964,11 +4862,11 @@ func (f *SecurableType) String() string { // Set raw string value and validate it against allowed values func (f *SecurableType) Set(v string) error { switch v { - case `catalog`, `connection`, `credential`, `external_location`, `function`, `metastore`, `pipeline`, `provider`, `recipient`, `schema`, `share`, `storage_credential`, `table`, `volume`: + case `CATALOG`, `CLEAN_ROOM`, `CONNECTION`, `CREDENTIAL`, `EXTERNAL_LOCATION`, `FUNCTION`, `METASTORE`, `PIPELINE`, `PROVIDER`, `RECIPIENT`, `SCHEMA`, `SHARE`, `STORAGE_CREDENTIAL`, `TABLE`, `VOLUME`: *f = SecurableType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "catalog", "connection", "credential", "external_location", "function", "metastore", "pipeline", "provider", "recipient", "schema", "share", "storage_credential", "table", "volume"`, v) + return fmt.Errorf(`value "%s" is not one of "CATALOG", "CLEAN_ROOM", "CONNECTION", "CREDENTIAL", "EXTERNAL_LOCATION", "FUNCTION", "METASTORE", "PIPELINE", "PROVIDER", "RECIPIENT", "SCHEMA", "SHARE", "STORAGE_CREDENTIAL", "TABLE", "VOLUME"`, v) } } @@ -5369,6 +5267,9 @@ type TemporaryCredentials struct { // Server time when the credential will expire, in epoch milliseconds. The // API client is advised to cache the credential given this expiration time. ExpirationTime int64 `json:"expiration_time,omitempty"` + // GCP temporary credentials for API authentication. Read more at + // https://developers.google.com/identity/protocols/oauth2/service-account + GcpOauthToken *GcpOauthToken `json:"gcp_oauth_token,omitempty"` ForceSendFields []string `json:"-"` } @@ -5463,6 +5364,8 @@ type UpdateCatalog struct { Name string `json:"-" url:"-"` // New name for the catalog. NewName string `json:"new_name,omitempty"` + // A map of key-value properties attached to the securable. + Options map[string]string `json:"options,omitempty"` // Username of current owner of catalog. Owner string `json:"owner,omitempty"` // A map of key-value properties attached to the securable. diff --git a/service/cleanrooms/api.go b/service/cleanrooms/api.go index 41fe43afd..df4c63937 100755 --- a/service/cleanrooms/api.go +++ b/service/cleanrooms/api.go @@ -230,7 +230,8 @@ type CleanRoomsInterface interface { // Create a new clean room with the specified collaborators. This method is // asynchronous; the returned name field inside the clean_room field can be used // to poll the clean room status, using the :method:cleanrooms/get method. When - // this method returns, the cluster will be in a PROVISIONING state. The cluster + // this method returns, the clean room will be in a PROVISIONING state, with + // only name, owner, comment, created_at and status populated. The clean room // will be usable once it enters an ACTIVE state. // // The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** diff --git a/service/cleanrooms/interface.go b/service/cleanrooms/interface.go index 6165f4e97..98262d34c 100755 --- a/service/cleanrooms/interface.go +++ b/service/cleanrooms/interface.go @@ -63,8 +63,9 @@ type CleanRoomsService interface { // Create a new clean room with the specified collaborators. This method is // asynchronous; the returned name field inside the clean_room field can be // used to poll the clean room status, using the :method:cleanrooms/get - // method. When this method returns, the cluster will be in a PROVISIONING - // state. The cluster will be usable once it enters an ACTIVE state. + // method. When this method returns, the clean room will be in a + // PROVISIONING state, with only name, owner, comment, created_at and status + // populated. The clean room will be usable once it enters an ACTIVE state. // // The caller must be a metastore admin or have the **CREATE_CLEAN_ROOM** // privilege on the metastore. diff --git a/service/compute/model.go b/service/compute/model.go index 5fe81ced7..fbf873ca2 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -1935,6 +1935,33 @@ func (s Created) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type CustomPolicyTag struct { + // The key of the tag. - Must be unique among all custom tags of the same + // policy - Cannot be “budget-policy-name”, “budget-policy-id” or + // "budget-policy-resolution-result" - these tags are preserved. + // + // - Follows the regex pattern defined in + // cluster-common/conf/src/ClusterTagConstraints.scala + // (https://src.dev.databricks.com/databricks/universe@1647196627c8dc7b4152ad098a94b86484b93a6c/-/blob/cluster-common/conf/src/ClusterTagConstraints.scala?L17) + Key string `json:"key"` + // The value of the tag. + // + // - Follows the regex pattern defined in + // cluster-common/conf/src/ClusterTagConstraints.scala + // (https://src.dev.databricks.com/databricks/universe@1647196627c8dc7b4152ad098a94b86484b93a6c/-/blob/cluster-common/conf/src/ClusterTagConstraints.scala?L24) + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *CustomPolicyTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CustomPolicyTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type DataPlaneEventDetails struct { // EventType DataPlaneEventDetailsEventType `json:"event_type,omitempty"` diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 1ec369728..11f706c95 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Genie, Lakeview, etc. +// These APIs allow you to manage Genie, Lakeview, Lakeview Embedded, Query Execution, etc. package dashboards import ( @@ -609,3 +609,63 @@ func (a *LakeviewAPI) UnpublishByDashboardId(ctx context.Context, dashboardId st DashboardId: dashboardId, }) } + +type LakeviewEmbeddedInterface interface { + + // Read a published dashboard in an embedded ui. + // + // Get the current published dashboard within an embedded context. + GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error + + // Read a published dashboard in an embedded ui. + // + // Get the current published dashboard within an embedded context. + GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error +} + +func NewLakeviewEmbedded(client *client.DatabricksClient) *LakeviewEmbeddedAPI { + return &LakeviewEmbeddedAPI{ + lakeviewEmbeddedImpl: lakeviewEmbeddedImpl{ + client: client, + }, + } +} + +// Token-based Lakeview APIs for embedding dashboards in external applications. +type LakeviewEmbeddedAPI struct { + lakeviewEmbeddedImpl +} + +// Read a published dashboard in an embedded ui. +// +// Get the current published dashboard within an embedded context. +func (a *LakeviewEmbeddedAPI) GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error { + return a.lakeviewEmbeddedImpl.GetPublishedDashboardEmbedded(ctx, GetPublishedDashboardEmbeddedRequest{ + DashboardId: dashboardId, + }) +} + +type QueryExecutionInterface interface { + + // Cancel the results for the a query for a published, embedded dashboard. + CancelPublishedQueryExecution(ctx context.Context, request CancelPublishedQueryExecutionRequest) (*CancelQueryExecutionResponse, error) + + // Execute a query for a published dashboard. + ExecutePublishedDashboardQuery(ctx context.Context, request ExecutePublishedDashboardQueryRequest) error + + // Poll the results for the a query for a published, embedded dashboard. + PollPublishedQueryStatus(ctx context.Context, request PollPublishedQueryStatusRequest) (*PollQueryStatusResponse, error) +} + +func NewQueryExecution(client *client.DatabricksClient) *QueryExecutionAPI { + return &QueryExecutionAPI{ + queryExecutionImpl: queryExecutionImpl{ + client: client, + }, + } +} + +// Query execution APIs for AI / BI Dashboards +type QueryExecutionAPI struct { + queryExecutionImpl +} diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index c23deddd6..cd55850bc 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -258,3 +258,54 @@ func (a *lakeviewImpl) UpdateSchedule(ctx context.Context, request UpdateSchedul err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request.Schedule, &schedule) return &schedule, err } + +// unexported type that holds implementations of just LakeviewEmbedded API methods +type lakeviewEmbeddedImpl struct { + client *client.DatabricksClient +} + +func (a *lakeviewEmbeddedImpl) GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error { + var getPublishedDashboardEmbeddedResponse GetPublishedDashboardEmbeddedResponse + path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/published/embedded", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedDashboardEmbeddedResponse) + return err +} + +// unexported type that holds implementations of just QueryExecution API methods +type queryExecutionImpl struct { + client *client.DatabricksClient +} + +func (a *queryExecutionImpl) CancelPublishedQueryExecution(ctx context.Context, request CancelPublishedQueryExecutionRequest) (*CancelQueryExecutionResponse, error) { + var cancelQueryExecutionResponse CancelQueryExecutionResponse + path := "/api/2.0/lakeview-query/query/published" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &cancelQueryExecutionResponse) + return &cancelQueryExecutionResponse, err +} + +func (a *queryExecutionImpl) ExecutePublishedDashboardQuery(ctx context.Context, request ExecutePublishedDashboardQueryRequest) error { + var executeQueryResponse ExecuteQueryResponse + path := "/api/2.0/lakeview-query/query/published" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &executeQueryResponse) + return err +} + +func (a *queryExecutionImpl) PollPublishedQueryStatus(ctx context.Context, request PollPublishedQueryStatusRequest) (*PollQueryStatusResponse, error) { + var pollQueryStatusResponse PollQueryStatusResponse + path := "/api/2.0/lakeview-query/query/published" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &pollQueryStatusResponse) + return &pollQueryStatusResponse, err +} diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index 76c728fc8..aeff5a93d 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -124,3 +124,25 @@ type LakeviewService interface { // Update dashboard schedule. UpdateSchedule(ctx context.Context, request UpdateScheduleRequest) (*Schedule, error) } + +// Token-based Lakeview APIs for embedding dashboards in external applications. +type LakeviewEmbeddedService interface { + + // Read a published dashboard in an embedded ui. + // + // Get the current published dashboard within an embedded context. + GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error +} + +// Query execution APIs for AI / BI Dashboards +type QueryExecutionService interface { + + // Cancel the results for the a query for a published, embedded dashboard. + CancelPublishedQueryExecution(ctx context.Context, request CancelPublishedQueryExecutionRequest) (*CancelQueryExecutionResponse, error) + + // Execute a query for a published dashboard. + ExecutePublishedDashboardQuery(ctx context.Context, request ExecutePublishedDashboardQueryRequest) error + + // Poll the results for the a query for a published, embedded dashboard. + PollPublishedQueryStatus(ctx context.Context, request PollPublishedQueryStatusRequest) (*PollQueryStatusResponse, error) +} diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 495e6b8db..7c7fd809e 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -9,6 +9,32 @@ import ( "github.com/databricks/databricks-sdk-go/service/sql" ) +// Cancel the results for the a query for a published, embedded dashboard +type CancelPublishedQueryExecutionRequest struct { + DashboardName string `json:"-" url:"dashboard_name"` + + DashboardRevisionId string `json:"-" url:"dashboard_revision_id"` + // Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + Tokens []string `json:"-" url:"tokens,omitempty"` +} + +type CancelQueryExecutionResponse struct { + Status []CancelQueryExecutionResponseStatus `json:"status,omitempty"` +} + +type CancelQueryExecutionResponseStatus struct { + // The token to poll for result asynchronously Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + DataToken string `json:"data_token"` + // Represents an empty message, similar to google.protobuf.Empty, which is + // not available in the firm right now. + Pending *Empty `json:"pending,omitempty"` + // Represents an empty message, similar to google.protobuf.Empty, which is + // not available in the firm right now. + Success *Empty `json:"success,omitempty"` +} + // Create dashboard type CreateDashboardRequest struct { Dashboard *Dashboard `json:"dashboard,omitempty"` @@ -223,6 +249,40 @@ func (s DeleteSubscriptionRequest) MarshalJSON() ([]byte, error) { type DeleteSubscriptionResponse struct { } +// Represents an empty message, similar to google.protobuf.Empty, which is not +// available in the firm right now. +type Empty struct { +} + +// Execute query request for published Dashboards. Since published dashboards +// have the option of running as the publisher, the datasets, warehouse_id are +// excluded from the request and instead read from the source (lakeview-config) +// via the additional parameters (dashboardName and dashboardRevisionId) +type ExecutePublishedDashboardQueryRequest struct { + // Dashboard name and revision_id is required to retrieve + // PublishedDatasetDataModel which contains the list of datasets, + // warehouse_id, and embedded_credentials + DashboardName string `json:"dashboard_name"` + + DashboardRevisionId string `json:"dashboard_revision_id"` + // A dashboard schedule can override the warehouse used as compute for + // processing the published dashboard queries + OverrideWarehouseId string `json:"override_warehouse_id,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ExecutePublishedDashboardQueryRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExecutePublishedDashboardQueryRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExecuteQueryResponse struct { +} + // Genie AI Response type GenieAttachment struct { Query *QueryAttachment `json:"query,omitempty"` @@ -378,6 +438,15 @@ type GetDashboardRequest struct { DashboardId string `json:"-" url:"-"` } +// Read a published dashboard in an embedded ui. +type GetPublishedDashboardEmbeddedRequest struct { + // UUID identifying the published dashboard. + DashboardId string `json:"-" url:"-"` +} + +type GetPublishedDashboardEmbeddedResponse struct { +} + // Get published dashboard type GetPublishedDashboardRequest struct { // UUID identifying the published dashboard. @@ -771,6 +840,30 @@ func (s MigrateDashboardRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type PendingStatus struct { + // The token to poll for result asynchronously Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + DataToken string `json:"data_token"` +} + +// Poll the results for the a query for a published, embedded dashboard +type PollPublishedQueryStatusRequest struct { + DashboardName string `json:"-" url:"dashboard_name"` + + DashboardRevisionId string `json:"-" url:"dashboard_revision_id"` + // Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + Tokens []string `json:"-" url:"tokens,omitempty"` +} + +type PollQueryStatusResponse struct { + Data []PollQueryStatusResponseData `json:"data,omitempty"` +} + +type PollQueryStatusResponseData struct { + Status QueryResponseStatus `json:"status"` +} + type PublishRequest struct { // UUID identifying the dashboard to be published. DashboardId string `json:"-" url:"-"` @@ -830,6 +923,8 @@ type QueryAttachment struct { LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` // AI generated SQL query Query string `json:"query,omitempty"` + + StatementId string `json:"statement_id,omitempty"` // Name of the query Title string `json:"title,omitempty"` @@ -844,6 +939,34 @@ func (s QueryAttachment) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type QueryResponseStatus struct { + // Represents an empty message, similar to google.protobuf.Empty, which is + // not available in the firm right now. + Canceled *Empty `json:"canceled,omitempty"` + // Represents an empty message, similar to google.protobuf.Empty, which is + // not available in the firm right now. + Closed *Empty `json:"closed,omitempty"` + + Pending *PendingStatus `json:"pending,omitempty"` + // The statement id in format(01eef5da-c56e-1f36-bafa-21906587d6ba) The + // statement_id should be identical to data_token in SuccessStatus and + // PendingStatus. This field is created for audit logging purpose to record + // the statement_id of all QueryResponseStatus. + StatementId string `json:"statement_id,omitempty"` + + Success *SuccessStatus `json:"success,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *QueryResponseStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s QueryResponseStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type QuerySchema struct { Columns []QuerySchemaColumn `json:"columns,omitempty"` // Used to determine if the stored query schema is compatible with the @@ -1007,6 +1130,24 @@ type SubscriptionSubscriberUser struct { UserId int64 `json:"user_id"` } +type SuccessStatus struct { + // The token to poll for result asynchronously Example: + // EC0A..ChAB7WCEn_4Qo4vkLqEbXsxxEgh3Y2pbWw45WhoQXgZSQo9aS5q2ZvFcbvbx9CgA-PAEAQ + DataToken string `json:"data_token"` + // Whether the query result is truncated (either by byte limit or row limit) + Truncated bool `json:"truncated,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *SuccessStatus) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SuccessStatus) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type TextAttachment struct { // AI generated message Content string `json:"content,omitempty"` diff --git a/service/jobs/model.go b/service/jobs/model.go index c94c00d51..f2303f89b 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -72,6 +72,12 @@ type BaseRun struct { CreatorUserName string `json:"creator_user_name,omitempty"` // Description of the run Description string `json:"description,omitempty"` + // effective_performance_target is the actual performance target used by the + // run during execution. effective_performance_target can differ from + // performance_target depending on if the job was eligible to be + // cost-optimized (e.g. contains at least 1 serverless task) or if we + // specifically override the value for the run (ex. RunNow). + EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. EndTime int64 `json:"end_time,omitempty"` @@ -596,6 +602,9 @@ type CreateJob struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` + // PerformanceTarget defines how performant or cost efficient the execution + // of run on serverless should be. + PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` // Write-only setting. Specifies the user or service principal that the job @@ -1661,6 +1670,9 @@ type JobSettings struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` + // PerformanceTarget defines how performant or cost efficient the execution + // of run on serverless should be. + PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` // Write-only setting. Specifies the user or service principal that the job @@ -2171,6 +2183,37 @@ func (f *PauseStatus) Type() string { return "PauseStatus" } +// PerformanceTarget defines how performant (lower latency) or cost efficient +// the execution of run on serverless compute should be. The performance mode on +// the job or pipeline should map to a performance setting that is passed to +// Cluster Manager (see cluster-common PerformanceTarget). +type PerformanceTarget string + +const PerformanceTargetCostOptimized PerformanceTarget = `COST_OPTIMIZED` + +const PerformanceTargetPerformanceOptimized PerformanceTarget = `PERFORMANCE_OPTIMIZED` + +// String representation for [fmt.Print] +func (f *PerformanceTarget) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *PerformanceTarget) Set(v string) error { + switch v { + case `COST_OPTIMIZED`, `PERFORMANCE_OPTIMIZED`: + *f = PerformanceTarget(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COST_OPTIMIZED", "PERFORMANCE_OPTIMIZED"`, v) + } +} + +// Type always returns PerformanceTarget to satisfy [pflag.Value] interface +func (f *PerformanceTarget) Type() string { + return "PerformanceTarget" +} + type PeriodicTriggerConfiguration struct { // The interval at which the trigger should run. Interval int `json:"interval"` @@ -2618,6 +2661,12 @@ type Run struct { CreatorUserName string `json:"creator_user_name,omitempty"` // Description of the run Description string `json:"description,omitempty"` + // effective_performance_target is the actual performance target used by the + // run during execution. effective_performance_target can differ from + // performance_target depending on if the job was eligible to be + // cost-optimized (e.g. contains at least 1 serverless task) or if we + // specifically override the value for the run (ex. RunNow). + EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. EndTime int64 `json:"end_time,omitempty"` @@ -3129,6 +3178,10 @@ type RunNow struct { // A list of task keys to run inside of the job. If this field is not // provided, all tasks in the job will be run. Only []string `json:"only,omitempty"` + // PerformanceTarget defines how performant or cost efficient the execution + // of run on serverless compute should be. For RunNow request, the run will + // execute with this settings instead of ones defined in job. + PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` @@ -3490,6 +3543,15 @@ type RunTask struct { DependsOn []TaskDependency `json:"depends_on,omitempty"` // An optional description for this task. Description string `json:"description,omitempty"` + // Denotes whether or not the task was disabled by the user. Disabled tasks + // do not execute and are immediately skipped as soon as they are unblocked. + Disabled bool `json:"disabled,omitempty"` + // effective_performance_target is the actual performance target used by the + // run during execution. effective_performance_target can differ from + // performance_target depending on if the job was eligible to be + // cost-optimized (e.g. contains at least 1 serverless task) or if an + // override was provided for the run (ex. RunNow). + EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. EmailNotifications *JobEmailNotifications `json:"email_notifications,omitempty"` @@ -4581,6 +4643,8 @@ func (s TaskNotificationSettings) MarshalJSON() ([]byte, error) { // [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now type TerminationCodeCode string +const TerminationCodeCodeBudgetPolicyLimitExceeded TerminationCodeCode = `BUDGET_POLICY_LIMIT_EXCEEDED` + // The run was canceled during execution by the platform; for // example, if the maximum run duration was exceeded. const TerminationCodeCodeCanceled TerminationCodeCode = `CANCELED` @@ -4678,11 +4742,11 @@ func (f *TerminationCodeCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationCodeCode) Set(v string) error { switch v { - case `CANCELED`, `CLOUD_FAILURE`, `CLUSTER_ERROR`, `CLUSTER_REQUEST_LIMIT_EXCEEDED`, `DRIVER_ERROR`, `FEATURE_DISABLED`, `INTERNAL_ERROR`, `INVALID_CLUSTER_REQUEST`, `INVALID_RUN_CONFIGURATION`, `LIBRARY_INSTALLATION_ERROR`, `MAX_CONCURRENT_RUNS_EXCEEDED`, `MAX_JOB_QUEUE_SIZE_EXCEEDED`, `MAX_SPARK_CONTEXTS_EXCEEDED`, `REPOSITORY_CHECKOUT_FAILED`, `RESOURCE_NOT_FOUND`, `RUN_EXECUTION_ERROR`, `SKIPPED`, `STORAGE_ACCESS_ERROR`, `SUCCESS`, `UNAUTHORIZED_ERROR`, `USER_CANCELED`, `WORKSPACE_RUN_LIMIT_EXCEEDED`: + case `BUDGET_POLICY_LIMIT_EXCEEDED`, `CANCELED`, `CLOUD_FAILURE`, `CLUSTER_ERROR`, `CLUSTER_REQUEST_LIMIT_EXCEEDED`, `DRIVER_ERROR`, `FEATURE_DISABLED`, `INTERNAL_ERROR`, `INVALID_CLUSTER_REQUEST`, `INVALID_RUN_CONFIGURATION`, `LIBRARY_INSTALLATION_ERROR`, `MAX_CONCURRENT_RUNS_EXCEEDED`, `MAX_JOB_QUEUE_SIZE_EXCEEDED`, `MAX_SPARK_CONTEXTS_EXCEEDED`, `REPOSITORY_CHECKOUT_FAILED`, `RESOURCE_NOT_FOUND`, `RUN_EXECUTION_ERROR`, `SKIPPED`, `STORAGE_ACCESS_ERROR`, `SUCCESS`, `UNAUTHORIZED_ERROR`, `USER_CANCELED`, `WORKSPACE_RUN_LIMIT_EXCEEDED`: *f = TerminationCodeCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CANCELED", "CLOUD_FAILURE", "CLUSTER_ERROR", "CLUSTER_REQUEST_LIMIT_EXCEEDED", "DRIVER_ERROR", "FEATURE_DISABLED", "INTERNAL_ERROR", "INVALID_CLUSTER_REQUEST", "INVALID_RUN_CONFIGURATION", "LIBRARY_INSTALLATION_ERROR", "MAX_CONCURRENT_RUNS_EXCEEDED", "MAX_JOB_QUEUE_SIZE_EXCEEDED", "MAX_SPARK_CONTEXTS_EXCEEDED", "REPOSITORY_CHECKOUT_FAILED", "RESOURCE_NOT_FOUND", "RUN_EXECUTION_ERROR", "SKIPPED", "STORAGE_ACCESS_ERROR", "SUCCESS", "UNAUTHORIZED_ERROR", "USER_CANCELED", "WORKSPACE_RUN_LIMIT_EXCEEDED"`, v) + return fmt.Errorf(`value "%s" is not one of "BUDGET_POLICY_LIMIT_EXCEEDED", "CANCELED", "CLOUD_FAILURE", "CLUSTER_ERROR", "CLUSTER_REQUEST_LIMIT_EXCEEDED", "DRIVER_ERROR", "FEATURE_DISABLED", "INTERNAL_ERROR", "INVALID_CLUSTER_REQUEST", "INVALID_RUN_CONFIGURATION", "LIBRARY_INSTALLATION_ERROR", "MAX_CONCURRENT_RUNS_EXCEEDED", "MAX_JOB_QUEUE_SIZE_EXCEEDED", "MAX_SPARK_CONTEXTS_EXCEEDED", "REPOSITORY_CHECKOUT_FAILED", "RESOURCE_NOT_FOUND", "RUN_EXECUTION_ERROR", "SKIPPED", "STORAGE_ACCESS_ERROR", "SUCCESS", "UNAUTHORIZED_ERROR", "USER_CANCELED", "WORKSPACE_RUN_LIMIT_EXCEEDED"`, v) } } diff --git a/service/oauth2/model.go b/service/oauth2/model.go index 80e7d7255..1fa578111 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -38,6 +38,10 @@ type CreateCustomAppIntegration struct { Scopes []string `json:"scopes,omitempty"` // Token access policy TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` + // Scopes that will need to be consented by end user to mint the access + // token. If the user does not authorize the access token will not be + // minted. Must be a subset of scopes. + UserAuthorizedScopes []string `json:"user_authorized_scopes,omitempty"` ForceSendFields []string `json:"-"` } @@ -256,6 +260,10 @@ type GetCustomAppIntegrationOutput struct { Scopes []string `json:"scopes,omitempty"` // Token access policy TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` + // Scopes that will need to be consented by end user to mint the access + // token. If the user does not authorize the access token will not be + // minted. Must be a subset of scopes. + UserAuthorizedScopes []string `json:"user_authorized_scopes,omitempty"` ForceSendFields []string `json:"-"` } @@ -655,6 +663,10 @@ type UpdateCustomAppIntegration struct { Scopes []string `json:"scopes,omitempty"` // Token access policy to be updated in the custom OAuth app integration TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` + // Scopes that will need to be consented by end user to mint the access + // token. If the user does not authorize the access token will not be + // minted. Must be a subset of scopes. + UserAuthorizedScopes []string `json:"user_authorized_scopes,omitempty"` } type UpdateCustomAppIntegrationOutput struct { diff --git a/service/pkg.go b/service/pkg.go index f26f56eda..0bd4cac58 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -22,6 +22,8 @@ // // - [billing.BillableUsageAPI]: This API allows you to download billable usage logs for the specified account and date range. // +// - [billing.BudgetPolicyAPI]: A service serves REST API about Budget policies. +// // - [catalog.CatalogsAPI]: A catalog is the first layer of Unity Catalog’s three-level namespace. // // - [cleanrooms.CleanRoomAssetsAPI]: Clean room assets are data and code objects — Tables, volumes, and notebooks that are shared with the clean room. @@ -50,10 +52,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. -// // - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. // +// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. @@ -80,6 +82,8 @@ // // - [settings.DisableLegacyFeaturesAPI]: Disable legacy features for new Databricks workspaces. // +// - [settings.EnableIpAccessListsAPI]: Controls the enforcement of IP access lists for accessing the account console. +// // - [provisioning.EncryptionKeysAPI]: These APIs manage encryption key configurations for this workspace (optional). // // - [settings.EnhancedSecurityMonitoringAPI]: Controls whether enhanced security monitoring is enabled for the current workspace. @@ -120,6 +124,8 @@ // // - [dashboards.LakeviewAPI]: These APIs provide specific management operations for Lakeview dashboards. // +// - [dashboards.LakeviewEmbeddedAPI]: Token-based Lakeview APIs for embedding dashboards in external applications. +// // - [compute.LibrariesAPI]: The Libraries API allows you to install and uninstall libraries and get the status of libraries on a cluster. // // - [billing.LogDeliveryAPI]: These APIs manage log delivery configurations for this account. @@ -184,6 +190,8 @@ // // - [sql.QueriesLegacyAPI]: These endpoints are used for CRUD operations on query definitions. // +// - [dashboards.QueryExecutionAPI]: Query execution APIs for AI / BI Dashboards. +// // - [sql.QueryHistoryAPI]: A service responsible for storing and retrieving the list of queries run against SQL endpoints and serverless compute. // // - [sql.QueryVisualizationsAPI]: This is an evolving API that facilitates the addition and removal of visualizations from existing queries in the Databricks Workspace. @@ -194,6 +202,8 @@ // // - [sharing.RecipientsAPI]: A recipient is an object you create using :method:recipients/create to represent an organization which you want to allow access shares. // +// - [sql.RedashConfigAPI]: Redash V2 service for workspace configurations (internal). +// // - [catalog.RegisteredModelsAPI]: Databricks provides a hosted version of MLflow Model Registry in Unity Catalog. // // - [workspace.ReposAPI]: The Repos API allows users to manage their git repos. @@ -312,6 +322,7 @@ var ( _ *catalog.ArtifactAllowlistsAPI = nil _ *settings.AutomaticClusterUpdateAPI = nil _ *billing.BillableUsageAPI = nil + _ *billing.BudgetPolicyAPI = nil _ *catalog.CatalogsAPI = nil _ *cleanrooms.CleanRoomAssetsAPI = nil _ *cleanrooms.CleanRoomTaskRunsAPI = nil @@ -326,8 +337,8 @@ var ( _ *marketplace.ConsumerListingsAPI = nil _ *marketplace.ConsumerPersonalizationRequestsAPI = nil _ *marketplace.ConsumerProvidersAPI = nil - _ *catalog.CredentialsAPI = nil _ *provisioning.CredentialsAPI = nil + _ *catalog.CredentialsAPI = nil _ *settings.CredentialsManagerAPI = nil _ *settings.CspEnablementAccountAPI = nil _ *iam.CurrentUserAPI = nil @@ -341,6 +352,7 @@ var ( _ *settings.DisableLegacyAccessAPI = nil _ *settings.DisableLegacyDbfsAPI = nil _ *settings.DisableLegacyFeaturesAPI = nil + _ *settings.EnableIpAccessListsAPI = nil _ *provisioning.EncryptionKeysAPI = nil _ *settings.EnhancedSecurityMonitoringAPI = nil _ *settings.EsmEnablementAccountAPI = nil @@ -361,6 +373,7 @@ var ( _ *settings.AccountIpAccessListsAPI = nil _ *jobs.JobsAPI = nil _ *dashboards.LakeviewAPI = nil + _ *dashboards.LakeviewEmbeddedAPI = nil _ *compute.LibrariesAPI = nil _ *billing.LogDeliveryAPI = nil _ *catalog.AccountMetastoreAssignmentsAPI = nil @@ -393,11 +406,13 @@ var ( _ *catalog.QualityMonitorsAPI = nil _ *sql.QueriesAPI = nil _ *sql.QueriesLegacyAPI = nil + _ *dashboards.QueryExecutionAPI = nil _ *sql.QueryHistoryAPI = nil _ *sql.QueryVisualizationsAPI = nil _ *sql.QueryVisualizationsLegacyAPI = nil _ *sharing.RecipientActivationAPI = nil _ *sharing.RecipientsAPI = nil + _ *sql.RedashConfigAPI = nil _ *catalog.RegisteredModelsAPI = nil _ *workspace.ReposAPI = nil _ *catalog.ResourceQuotasAPI = nil diff --git a/service/serving/api.go b/service/serving/api.go index 3e7182827..72274627a 100755 --- a/service/serving/api.go +++ b/service/serving/api.go @@ -106,7 +106,7 @@ type ServingEndpointsInterface interface { GetPermissionsByServingEndpointId(ctx context.Context, servingEndpointId string) (*ServingEndpointPermissions, error) // Make external services call using the credentials stored in UC Connection. - HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*ExternalFunctionResponse, error) + HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*HttpRequestResponse, error) // Get all serving endpoints. // diff --git a/service/serving/impl.go b/service/serving/impl.go index 220d1f3b4..9eda39ef7 100755 --- a/service/serving/impl.go +++ b/service/serving/impl.go @@ -99,15 +99,15 @@ func (a *servingEndpointsImpl) GetPermissions(ctx context.Context, request GetSe return &servingEndpointPermissions, err } -func (a *servingEndpointsImpl) HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*ExternalFunctionResponse, error) { - var externalFunctionResponse ExternalFunctionResponse +func (a *servingEndpointsImpl) HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*HttpRequestResponse, error) { + var httpRequestResponse HttpRequestResponse path := "/api/2.0/external-function" queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" + headers["Accept"] = "text/plain" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &externalFunctionResponse) - return &externalFunctionResponse, err + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &httpRequestResponse) + return &httpRequestResponse, err } func (a *servingEndpointsImpl) List(ctx context.Context) (*ListEndpointsResponse, error) { diff --git a/service/serving/interface.go b/service/serving/interface.go index 806e0f49f..ae4452c3f 100755 --- a/service/serving/interface.go +++ b/service/serving/interface.go @@ -63,7 +63,7 @@ type ServingEndpointsService interface { // Make external services call using the credentials stored in UC // Connection. - HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*ExternalFunctionResponse, error) + HttpRequest(ctx context.Context, request ExternalFunctionRequest) (*HttpRequestResponse, error) // Get all serving endpoints. // diff --git a/service/serving/model.go b/service/serving/model.go index 6d22239da..1fc96710a 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -845,23 +845,6 @@ func (f *ExternalFunctionRequestHttpMethod) Type() string { return "ExternalFunctionRequestHttpMethod" } -type ExternalFunctionResponse struct { - // The HTTP status code of the response - StatusCode int `json:"status_code,omitempty"` - // The content of the response - Text string `json:"text,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *ExternalFunctionResponse) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s ExternalFunctionResponse) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - type ExternalModel struct { // AI21Labs Config. Only required if the provider is 'ai21labs'. Ai21labsConfig *Ai21LabsConfig `json:"ai21labs_config,omitempty"` @@ -1049,6 +1032,10 @@ func (s GoogleCloudVertexAiConfig) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type HttpRequestResponse struct { + Contents io.ReadCloser `json:"-"` +} + type ListEndpointsResponse struct { // The list of endpoints. Endpoints []ServingEndpoint `json:"endpoints,omitempty"` diff --git a/service/settings/api.go b/service/settings/api.go index 520214899..a63c584b9 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Ip Access Lists, Account Settings, Aibi Dashboard Embedding Access Policy, Aibi Dashboard Embedding Approved Domains, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Dbfs, Disable Legacy Features, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. +// These APIs allow you to manage Account Ip Access Lists, Account Settings, Aibi Dashboard Embedding Access Policy, Aibi Dashboard Embedding Approved Domains, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Dbfs, Disable Legacy Features, Enable Ip Access Lists, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. package settings import ( @@ -277,6 +277,11 @@ type AccountSettingsInterface interface { // prior to 13.3LTS. DisableLegacyFeatures() DisableLegacyFeaturesInterface + // Controls the enforcement of IP access lists for accessing the account + // console. Allowing you to enable or disable restricted access based on IP + // addresses. + EnableIpAccessLists() EnableIpAccessListsInterface + // The enhanced security monitoring setting at the account level controls // whether to enable the feature on new workspaces. By default, this // account-level setting is disabled for new workspaces. After workspace @@ -307,6 +312,8 @@ func NewAccountSettings(client *client.DatabricksClient) *AccountSettingsAPI { disableLegacyFeatures: NewDisableLegacyFeatures(client), + enableIpAccessLists: NewEnableIpAccessLists(client), + esmEnablementAccount: NewEsmEnablementAccount(client), personalCompute: NewPersonalCompute(client), @@ -335,6 +342,11 @@ type AccountSettingsAPI struct { // prior to 13.3LTS. disableLegacyFeatures DisableLegacyFeaturesInterface + // Controls the enforcement of IP access lists for accessing the account + // console. Allowing you to enable or disable restricted access based on IP + // addresses. + enableIpAccessLists EnableIpAccessListsInterface + // The enhanced security monitoring setting at the account level controls // whether to enable the feature on new workspaces. By default, this // account-level setting is disabled for new workspaces. After workspace @@ -363,6 +375,10 @@ func (a *AccountSettingsAPI) DisableLegacyFeatures() DisableLegacyFeaturesInterf return a.disableLegacyFeatures } +func (a *AccountSettingsAPI) EnableIpAccessLists() EnableIpAccessListsInterface { + return a.enableIpAccessLists +} + func (a *AccountSettingsAPI) EsmEnablementAccount() EsmEnablementAccountInterface { return a.esmEnablementAccount } @@ -723,6 +739,39 @@ type DisableLegacyFeaturesAPI struct { disableLegacyFeaturesImpl } +type EnableIpAccessListsInterface interface { + + // Delete the account IP access toggle setting. + // + // Reverts the value of the account IP access toggle setting to default (ON) + Delete(ctx context.Context, request DeleteAccountIpAccessEnableRequest) (*DeleteAccountIpAccessEnableResponse, error) + + // Get the account IP access toggle setting. + // + // Gets the value of the account IP access toggle setting. + Get(ctx context.Context, request GetAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) + + // Update the account IP access toggle setting. + // + // Updates the value of the account IP access toggle setting. + Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) +} + +func NewEnableIpAccessLists(client *client.DatabricksClient) *EnableIpAccessListsAPI { + return &EnableIpAccessListsAPI{ + enableIpAccessListsImpl: enableIpAccessListsImpl{ + client: client, + }, + } +} + +// Controls the enforcement of IP access lists for accessing the account +// console. Allowing you to enable or disable restricted access based on IP +// addresses. +type EnableIpAccessListsAPI struct { + enableIpAccessListsImpl +} + type EnhancedSecurityMonitoringInterface interface { // Get the enhanced security monitoring setting. diff --git a/service/settings/impl.go b/service/settings/impl.go index ea0048fc6..77c0312ba 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -393,6 +393,42 @@ func (a *disableLegacyFeaturesImpl) Update(ctx context.Context, request UpdateDi return &disableLegacyFeatures, err } +// unexported type that holds implementations of just EnableIpAccessLists API methods +type enableIpAccessListsImpl struct { + client *client.DatabricksClient +} + +func (a *enableIpAccessListsImpl) Delete(ctx context.Context, request DeleteAccountIpAccessEnableRequest) (*DeleteAccountIpAccessEnableResponse, error) { + var deleteAccountIpAccessEnableResponse DeleteAccountIpAccessEnableResponse + path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/acct_ip_acl_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteAccountIpAccessEnableResponse) + return &deleteAccountIpAccessEnableResponse, err +} + +func (a *enableIpAccessListsImpl) Get(ctx context.Context, request GetAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) { + var accountIpAccessEnable AccountIpAccessEnable + path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/acct_ip_acl_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &accountIpAccessEnable) + return &accountIpAccessEnable, err +} + +func (a *enableIpAccessListsImpl) Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) { + var accountIpAccessEnable AccountIpAccessEnable + path := fmt.Sprintf("/api/2.0/accounts/%v/settings/types/acct_ip_acl_enable/names/default", a.client.ConfiguredAccountID()) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &accountIpAccessEnable) + return &accountIpAccessEnable, err +} + // unexported type that holds implementations of just EnhancedSecurityMonitoring API methods type enhancedSecurityMonitoringImpl struct { client *client.DatabricksClient diff --git a/service/settings/interface.go b/service/settings/interface.go index d78c38625..cb38b4641 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -344,6 +344,27 @@ type DisableLegacyFeaturesService interface { Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) } +// Controls the enforcement of IP access lists for accessing the account +// console. Allowing you to enable or disable restricted access based on IP +// addresses. +type EnableIpAccessListsService interface { + + // Delete the account IP access toggle setting. + // + // Reverts the value of the account IP access toggle setting to default (ON) + Delete(ctx context.Context, request DeleteAccountIpAccessEnableRequest) (*DeleteAccountIpAccessEnableResponse, error) + + // Get the account IP access toggle setting. + // + // Gets the value of the account IP access toggle setting. + Get(ctx context.Context, request GetAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) + + // Update the account IP access toggle setting. + // + // Updates the value of the account IP access toggle setting. + Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) +} + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the compliance diff --git a/service/settings/model.go b/service/settings/model.go index 66cc47739..c2dded259 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -8,6 +8,34 @@ import ( "github.com/databricks/databricks-sdk-go/marshal" ) +type AccountIpAccessEnable struct { + AcctIpAclEnable BooleanMessage `json:"acct_ip_acl_enable"` + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // update pattern to perform setting updates in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // PATCH request to identify the setting version you are updating. + Etag string `json:"etag,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *AccountIpAccessEnable) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AccountIpAccessEnable) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type AibiDashboardEmbeddingAccessPolicy struct { AccessPolicyType AibiDashboardEmbeddingAccessPolicyAccessPolicyType `json:"access_policy_type"` } @@ -360,8 +388,12 @@ const ComplianceStandardFedrampModerate ComplianceStandard = `FEDRAMP_MODERATE` const ComplianceStandardHipaa ComplianceStandard = `HIPAA` +const ComplianceStandardHitrust ComplianceStandard = `HITRUST` + const ComplianceStandardIrapProtected ComplianceStandard = `IRAP_PROTECTED` +const ComplianceStandardIsmap ComplianceStandard = `ISMAP` + const ComplianceStandardItarEar ComplianceStandard = `ITAR_EAR` const ComplianceStandardNone ComplianceStandard = `NONE` @@ -376,11 +408,11 @@ func (f *ComplianceStandard) String() string { // Set raw string value and validate it against allowed values func (f *ComplianceStandard) Set(v string) error { switch v { - case `CANADA_PROTECTED_B`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `IRAP_PROTECTED`, `ITAR_EAR`, `NONE`, `PCI_DSS`: + case `CANADA_PROTECTED_B`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `HITRUST`, `IRAP_PROTECTED`, `ISMAP`, `ITAR_EAR`, `NONE`, `PCI_DSS`: *f = ComplianceStandard(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CANADA_PROTECTED_B", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "IRAP_PROTECTED", "ITAR_EAR", "NONE", "PCI_DSS"`, v) + return fmt.Errorf(`value "%s" is not one of "CANADA_PROTECTED_B", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "HITRUST", "IRAP_PROTECTED", "ISMAP", "ITAR_EAR", "NONE", "PCI_DSS"`, v) } } @@ -655,6 +687,40 @@ func (s DefaultNamespaceSetting) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Delete the account IP access toggle setting +type DeleteAccountIpAccessEnableRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *DeleteAccountIpAccessEnableRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeleteAccountIpAccessEnableRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The etag is returned. +type DeleteAccountIpAccessEnableResponse struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"etag"` +} + // Delete access list type DeleteAccountIpAccessListRequest struct { // The ID for the corresponding IP access list @@ -1515,6 +1581,28 @@ func (s GenericWebhookConfig) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Get the account IP access toggle setting +type GetAccountIpAccessEnableRequest struct { + // etag used for versioning. The response is at least as fresh as the eTag + // provided. This is used for optimistic concurrency control as a way to + // help prevent simultaneous writes of a setting overwriting each other. It + // is strongly suggested that systems make use of the etag in the read -> + // delete pattern to perform setting deletions in order to avoid race + // conditions. That is, get an etag from a GET request, and pass it with the + // DELETE request to identify the rule set version you are deleting. + Etag string `json:"-" url:"etag,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *GetAccountIpAccessEnableRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetAccountIpAccessEnableRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get IP access list type GetAccountIpAccessListRequest struct { // The ID for the corresponding IP access list @@ -2775,6 +2863,8 @@ type TokenType string const TokenTypeArclightAzureExchangeToken TokenType = `ARCLIGHT_AZURE_EXCHANGE_TOKEN` +const TokenTypeArclightAzureExchangeTokenWithUserDelegationKey TokenType = `ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY` + const TokenTypeAzureActiveDirectoryToken TokenType = `AZURE_ACTIVE_DIRECTORY_TOKEN` // String representation for [fmt.Print] @@ -2785,11 +2875,11 @@ func (f *TokenType) String() string { // Set raw string value and validate it against allowed values func (f *TokenType) Set(v string) error { switch v { - case `ARCLIGHT_AZURE_EXCHANGE_TOKEN`, `AZURE_ACTIVE_DIRECTORY_TOKEN`: + case `ARCLIGHT_AZURE_EXCHANGE_TOKEN`, `ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY`, `AZURE_ACTIVE_DIRECTORY_TOKEN`: *f = TokenType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ARCLIGHT_AZURE_EXCHANGE_TOKEN", "AZURE_ACTIVE_DIRECTORY_TOKEN"`, v) + return fmt.Errorf(`value "%s" is not one of "ARCLIGHT_AZURE_EXCHANGE_TOKEN", "ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY", "AZURE_ACTIVE_DIRECTORY_TOKEN"`, v) } } @@ -2798,15 +2888,43 @@ func (f *TokenType) Type() string { return "TokenType" } +// Details required to update a setting. +type UpdateAccountIpAccessEnableRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting AccountIpAccessEnable `json:"setting"` +} + // Details required to update a setting. type UpdateAibiDashboardEmbeddingAccessPolicySettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting AibiDashboardEmbeddingAccessPolicySetting `json:"setting"` @@ -2817,10 +2935,17 @@ type UpdateAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting AibiDashboardEmbeddingApprovedDomainsSetting `json:"setting"` @@ -2831,10 +2956,17 @@ type UpdateAutomaticClusterUpdateSettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting AutomaticClusterUpdateSetting `json:"setting"` @@ -2845,10 +2977,17 @@ type UpdateComplianceSecurityProfileSettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting ComplianceSecurityProfileSetting `json:"setting"` @@ -2859,10 +2998,17 @@ type UpdateCspEnablementAccountSettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting CspEnablementAccountSetting `json:"setting"` @@ -2873,10 +3019,17 @@ type UpdateDefaultNamespaceSettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` // This represents the setting configuration for the default namespace in // the Databricks workspace. Setting the default catalog for the workspace @@ -2895,10 +3048,17 @@ type UpdateDisableLegacyAccessRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting DisableLegacyAccess `json:"setting"` @@ -2909,10 +3069,17 @@ type UpdateDisableLegacyDbfsRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting DisableLegacyDbfs `json:"setting"` @@ -2923,10 +3090,17 @@ type UpdateDisableLegacyFeaturesRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting DisableLegacyFeatures `json:"setting"` @@ -2937,10 +3111,17 @@ type UpdateEnhancedSecurityMonitoringSettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting EnhancedSecurityMonitoringSetting `json:"setting"` @@ -2951,10 +3132,17 @@ type UpdateEsmEnablementAccountSettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting EsmEnablementAccountSetting `json:"setting"` @@ -3014,10 +3202,17 @@ type UpdatePersonalComputeSettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting PersonalComputeSetting `json:"setting"` @@ -3031,10 +3226,17 @@ type UpdateRestrictWorkspaceAdminsSettingRequest struct { // This should always be set to true for Settings API. Added for AIP // compliance. AllowMissing bool `json:"allow_missing"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. FieldMask string `json:"field_mask"` Setting RestrictWorkspaceAdminsSetting `json:"setting"` diff --git a/service/sql/api.go b/service/sql/api.go index 784b33de4..e3a899a35 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Alerts, Alerts Legacy, Dashboard Widgets, Dashboards, Data Sources, Dbsql Permissions, Queries, Queries Legacy, Query History, Query Visualizations, Query Visualizations Legacy, Statement Execution, Warehouses, etc. +// These APIs allow you to manage Alerts, Alerts Legacy, Dashboard Widgets, Dashboards, Data Sources, Dbsql Permissions, Queries, Queries Legacy, Query History, Query Visualizations, Query Visualizations Legacy, Redash Config, Statement Execution, Warehouses, etc. package sql import ( @@ -1627,6 +1627,25 @@ func (a *QueryVisualizationsLegacyAPI) DeleteById(ctx context.Context, id string }) } +type RedashConfigInterface interface { + + // Read workspace configuration for Redash-v2. + GetConfig(ctx context.Context) (*ClientConfig, error) +} + +func NewRedashConfig(client *client.DatabricksClient) *RedashConfigAPI { + return &RedashConfigAPI{ + redashConfigImpl: redashConfigImpl{ + client: client, + }, + } +} + +// Redash V2 service for workspace configurations (internal) +type RedashConfigAPI struct { + redashConfigImpl +} + type StatementExecutionInterface interface { statementExecutionAPIUtilities diff --git a/service/sql/impl.go b/service/sql/impl.go index 5ab751c15..6dd38e9c9 100755 --- a/service/sql/impl.go +++ b/service/sql/impl.go @@ -503,6 +503,21 @@ func (a *queryVisualizationsLegacyImpl) Update(ctx context.Context, request Lega return &legacyVisualization, err } +// unexported type that holds implementations of just RedashConfig API methods +type redashConfigImpl struct { + client *client.DatabricksClient +} + +func (a *redashConfigImpl) GetConfig(ctx context.Context) (*ClientConfig, error) { + var clientConfig ClientConfig + path := "/api/2.0/redash-v2/config" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &clientConfig) + return &clientConfig, err +} + // unexported type that holds implementations of just StatementExecution API methods type statementExecutionImpl struct { client *client.DatabricksClient diff --git a/service/sql/interface.go b/service/sql/interface.go index 7e1d2838f..52301cdeb 100755 --- a/service/sql/interface.go +++ b/service/sql/interface.go @@ -479,6 +479,13 @@ type QueryVisualizationsLegacyService interface { Update(ctx context.Context, request LegacyVisualization) (*LegacyVisualization, error) } +// Redash V2 service for workspace configurations (internal) +type RedashConfigService interface { + + // Read workspace configuration for Redash-v2. + GetConfig(ctx context.Context) (*ClientConfig, error) +} + // The Databricks SQL Statement Execution API can be used to execute SQL // statements on a SQL warehouse and fetch the result. // diff --git a/service/sql/model.go b/service/sql/model.go index bf307030c..f74768bba 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -423,6 +423,38 @@ func (f *ChannelName) Type() string { return "ChannelName" } +type ClientConfig struct { + AllowCustomJsVisualizations bool `json:"allow_custom_js_visualizations,omitempty"` + + AllowDownloads bool `json:"allow_downloads,omitempty"` + + AllowExternalShares bool `json:"allow_external_shares,omitempty"` + + AllowSubscriptions bool `json:"allow_subscriptions,omitempty"` + + DateFormat string `json:"date_format,omitempty"` + + DateTimeFormat string `json:"date_time_format,omitempty"` + + DisablePublish bool `json:"disable_publish,omitempty"` + + EnableLegacyAutodetectTypes bool `json:"enable_legacy_autodetect_types,omitempty"` + + FeatureShowPermissionsControl bool `json:"feature_show_permissions_control,omitempty"` + + HidePlotlyModeBar bool `json:"hide_plotly_mode_bar,omitempty"` + + ForceSendFields []string `json:"-"` +} + +func (s *ClientConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ClientConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type ColumnInfo struct { // The name of the column. Name string `json:"name,omitempty"` @@ -4323,10 +4355,17 @@ type UpdateAlertRequest struct { Alert *UpdateAlertRequestAlert `json:"alert,omitempty"` Id string `json:"-" url:"-"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. UpdateMask string `json:"update_mask"` } @@ -4373,10 +4412,17 @@ type UpdateQueryRequest struct { Id string `json:"-" url:"-"` Query *UpdateQueryRequestQuery `json:"query,omitempty"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. UpdateMask string `json:"update_mask"` } @@ -4422,10 +4468,17 @@ type UpdateResponse struct { type UpdateVisualizationRequest struct { Id string `json:"-" url:"-"` - // Field mask is required to be passed into the PATCH request. Field mask - // specifies which fields of the setting payload will be updated. The field - // mask needs to be supplied as single string. To specify multiple fields in - // the field mask, use comma as the separator (no space). + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. UpdateMask string `json:"update_mask"` Visualization *UpdateVisualizationRequestVisualization `json:"visualization,omitempty"` diff --git a/version/version.go b/version/version.go index 61aa3beb4..47e4ca507 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.56.1" +const Version = "0.57.0" diff --git a/workspace_client.go b/workspace_client.go index b129ce30d..1c34bd372 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -443,6 +443,10 @@ type WorkspaceClient struct { // (import, export, get-status, list, delete). Lakeview dashboards.LakeviewInterface + // Token-based Lakeview APIs for embedding dashboards in external + // applications. + LakeviewEmbedded dashboards.LakeviewEmbeddedInterface + // The Libraries API allows you to install and uninstall libraries and get // the status of libraries on a cluster. // @@ -687,6 +691,9 @@ type WorkspaceClient struct { // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html QueriesLegacy sql.QueriesLegacyInterface + // Query execution APIs for AI / BI Dashboards + QueryExecution dashboards.QueryExecutionInterface + // A service responsible for storing and retrieving the list of queries run // against SQL endpoints and serverless compute. QueryHistory sql.QueryHistoryInterface @@ -737,6 +744,9 @@ type WorkspaceClient struct { // shared data. This sharing mode is called **open sharing**. Recipients sharing.RecipientsInterface + // Redash V2 service for workspace configurations (internal) + RedashConfig sql.RedashConfigInterface + // Databricks provides a hosted version of MLflow Model Registry in Unity // Catalog. Models in Unity Catalog provide centralized access control, // auditing, lineage, and discovery of ML models across Databricks @@ -1199,6 +1209,7 @@ func NewWorkspaceClient(c ...*Config) (*WorkspaceClient, error) { IpAccessLists: settings.NewIpAccessLists(databricksClient), Jobs: jobs.NewJobs(databricksClient), Lakeview: dashboards.NewLakeview(databricksClient), + LakeviewEmbedded: dashboards.NewLakeviewEmbedded(databricksClient), Libraries: compute.NewLibraries(databricksClient), Metastores: catalog.NewMetastores(databricksClient), ModelRegistry: ml.NewModelRegistry(databricksClient), @@ -1222,11 +1233,13 @@ func NewWorkspaceClient(c ...*Config) (*WorkspaceClient, error) { QualityMonitors: catalog.NewQualityMonitors(databricksClient), Queries: sql.NewQueries(databricksClient), QueriesLegacy: sql.NewQueriesLegacy(databricksClient), + QueryExecution: dashboards.NewQueryExecution(databricksClient), QueryHistory: sql.NewQueryHistory(databricksClient), QueryVisualizations: sql.NewQueryVisualizations(databricksClient), QueryVisualizationsLegacy: sql.NewQueryVisualizationsLegacy(databricksClient), RecipientActivation: sharing.NewRecipientActivation(databricksClient), Recipients: sharing.NewRecipients(databricksClient), + RedashConfig: sql.NewRedashConfig(databricksClient), RegisteredModels: catalog.NewRegisteredModels(databricksClient), Repos: workspace.NewRepos(databricksClient), ResourceQuotas: catalog.NewResourceQuotas(databricksClient), From 3aebd68bf334b94e63974963cd967f836b559a48 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Thu, 6 Feb 2025 15:37:08 +0100 Subject: [PATCH 11/54] [Internal] Introduce new TokenSource interface that takes a `context.Context` (#1141) ## What changes are proposed in this pull request? This PR adds a new `TokenSource` interface which is essentially the same as `oauth2.TokenSource` but takes a `context.Context` as input. The rationale behind this interface is that many of the SDK `TokenSource` end-up making network calls under the hood. We want these to be done passing the context of the calling method. This interface is meant to ultimately entirely replace the use of `oauth2.TokenSource` in the SDK. ## How is this tested? Simple unit tests. --- config/experimental/auth/auth.go | 32 ++++++++++------- config/experimental/auth/auth_test.go | 5 +-- config/experimental/auth/authconv/authconv.go | 34 +++++++++++++++++++ .../auth/authconv/authconv_test.go | 31 +++++++++++++++++ config/oauth_visitors.go | 19 +++++++---- 5 files changed, 100 insertions(+), 21 deletions(-) create mode 100644 config/experimental/auth/authconv/authconv.go create mode 100644 config/experimental/auth/authconv/authconv_test.go diff --git a/config/experimental/auth/auth.go b/config/experimental/auth/auth.go index 2f560498b..abea1aefd 100644 --- a/config/experimental/auth/auth.go +++ b/config/experimental/auth/auth.go @@ -5,6 +5,7 @@ package auth import ( + "context" "sync" "time" @@ -21,6 +22,13 @@ const ( defaultDisableAsyncRefresh = true ) +// A TokenSource is anything that can return a token. +type TokenSource interface { + // Token returns a token or an error. Token must be safe for concurrent use + // by multiple goroutines. The returned Token must not be modified. + Token(context.Context) (*oauth2.Token, error) +} + type Option func(*cachedTokenSource) // WithCachedToken sets the initial token to be used by a cached token source. @@ -50,7 +58,7 @@ func WithAsyncRefresh(b bool) Option { // // If the TokenSource is already a cached token source (obtained by calling this // function), it is returned as is. -func NewCachedTokenSource(ts oauth2.TokenSource, opts ...Option) oauth2.TokenSource { +func NewCachedTokenSource(ts TokenSource, opts ...Option) TokenSource { // This is meant as a niche optimization to avoid double caching of the // token source in situations where the user calls needs caching guarantees // but does not know if the token source is already cached. @@ -75,7 +83,7 @@ func NewCachedTokenSource(ts oauth2.TokenSource, opts ...Option) oauth2.TokenSou type cachedTokenSource struct { // The token source to obtain tokens from. - tokenSource oauth2.TokenSource + tokenSource TokenSource // If true, only refresh the token with a blocking call when it is expired. disableAsync bool @@ -102,11 +110,11 @@ type cachedTokenSource struct { // Token returns a token from the cache or fetches a new one if the current // token is expired. -func (cts *cachedTokenSource) Token() (*oauth2.Token, error) { +func (cts *cachedTokenSource) Token(ctx context.Context) (*oauth2.Token, error) { if cts.disableAsync { - return cts.blockingToken() + return cts.blockingToken(ctx) } - return cts.asyncToken() + return cts.asyncToken(ctx) } // tokenState represents the state of the token. Each token can be in one of @@ -145,7 +153,7 @@ func (c *cachedTokenSource) tokenState() tokenState { } } -func (cts *cachedTokenSource) asyncToken() (*oauth2.Token, error) { +func (cts *cachedTokenSource) asyncToken(ctx context.Context) (*oauth2.Token, error) { cts.mu.Lock() ts := cts.tokenState() t := cts.cachedToken @@ -155,14 +163,14 @@ func (cts *cachedTokenSource) asyncToken() (*oauth2.Token, error) { case fresh: return t, nil case stale: - cts.triggerAsyncRefresh() + cts.triggerAsyncRefresh(ctx) return t, nil default: // expired - return cts.blockingToken() + return cts.blockingToken(ctx) } } -func (cts *cachedTokenSource) blockingToken() (*oauth2.Token, error) { +func (cts *cachedTokenSource) blockingToken(ctx context.Context) (*oauth2.Token, error) { cts.mu.Lock() // The lock is kept for the entire operation to ensure that only one @@ -182,7 +190,7 @@ func (cts *cachedTokenSource) blockingToken() (*oauth2.Token, error) { return cts.cachedToken, nil } - t, err := cts.tokenSource.Token() + t, err := cts.tokenSource.Token(ctx) if err != nil { return nil, err } @@ -190,14 +198,14 @@ func (cts *cachedTokenSource) blockingToken() (*oauth2.Token, error) { return t, nil } -func (cts *cachedTokenSource) triggerAsyncRefresh() { +func (cts *cachedTokenSource) triggerAsyncRefresh(ctx context.Context) { cts.mu.Lock() defer cts.mu.Unlock() if !cts.isRefreshing && cts.refreshErr == nil { cts.isRefreshing = true go func() { - t, err := cts.tokenSource.Token() + t, err := cts.tokenSource.Token(ctx) cts.mu.Lock() defer cts.mu.Unlock() diff --git a/config/experimental/auth/auth_test.go b/config/experimental/auth/auth_test.go index 035ebe42d..24a0d13bf 100644 --- a/config/experimental/auth/auth_test.go +++ b/config/experimental/auth/auth_test.go @@ -1,6 +1,7 @@ package auth import ( + "context" "fmt" "reflect" "sync" @@ -13,7 +14,7 @@ import ( type mockTokenSource func() (*oauth2.Token, error) -func (m mockTokenSource) Token() (*oauth2.Token, error) { +func (m mockTokenSource) Token(_ context.Context) (*oauth2.Token, error) { return m() } @@ -258,7 +259,7 @@ func TestCachedTokenSource_Token(t *testing.T) { wg.Add(1) go func() { defer wg.Done() - cts.Token() + cts.Token(context.Background()) }() } diff --git a/config/experimental/auth/authconv/authconv.go b/config/experimental/auth/authconv/authconv.go new file mode 100644 index 000000000..0f8a1b5d3 --- /dev/null +++ b/config/experimental/auth/authconv/authconv.go @@ -0,0 +1,34 @@ +package authconv + +import ( + "context" + + "github.com/databricks/databricks-sdk-go/config/experimental/auth" + "golang.org/x/oauth2" +) + +// AuthTokenSource converts an oauth2.TokenSource to an auth.TokenSource. +func AuthTokenSource(ts oauth2.TokenSource) auth.TokenSource { + return &authTokenSource{ts: ts} +} + +type authTokenSource struct { + ts oauth2.TokenSource +} + +func (t *authTokenSource) Token(_ context.Context) (*oauth2.Token, error) { + return t.ts.Token() +} + +// OAuth2TokenSource converts an auth.TokenSource to an oauth2.TokenSource. +func OAuth2TokenSource(ts auth.TokenSource) oauth2.TokenSource { + return &oauth2TokenSource{ts: ts} +} + +type oauth2TokenSource struct { + ts auth.TokenSource +} + +func (t *oauth2TokenSource) Token() (*oauth2.Token, error) { + return t.ts.Token(context.Background()) +} diff --git a/config/experimental/auth/authconv/authconv_test.go b/config/experimental/auth/authconv/authconv_test.go new file mode 100644 index 000000000..5ef223b38 --- /dev/null +++ b/config/experimental/auth/authconv/authconv_test.go @@ -0,0 +1,31 @@ +package authconv + +import ( + "fmt" + "testing" + + "golang.org/x/oauth2" +) + +type mockOauth2TokenSource func() (*oauth2.Token, error) + +func (t mockOauth2TokenSource) Token() (*oauth2.Token, error) { + return t() +} + +func TestIndepotency(t *testing.T) { + wantErr := fmt.Errorf("test error") + wantToken := &oauth2.Token{AccessToken: "test token"} + ts := mockOauth2TokenSource(func() (*oauth2.Token, error) { + return wantToken, wantErr + }) + + gotToken, gotErr := OAuth2TokenSource(AuthTokenSource(ts)).Token() + + if gotErr != wantErr { + t.Errorf("Token() = %v, want %v", gotErr, wantErr) + } + if gotToken != wantToken { + t.Errorf("Token() = %v, want %v", gotToken, wantToken) + } +} diff --git a/config/oauth_visitors.go b/config/oauth_visitors.go index e9d3277c2..fc7a3d153 100644 --- a/config/oauth_visitors.go +++ b/config/oauth_visitors.go @@ -1,11 +1,13 @@ package config import ( + "context" "fmt" "net/http" "time" "github.com/databricks/databricks-sdk-go/config/experimental/auth" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/authconv" "golang.org/x/oauth2" ) @@ -13,16 +15,16 @@ import ( // to the token from the auth token sourcevand the provided secondary header to // the token from the secondary token source. func serviceToServiceVisitor(primary, secondary oauth2.TokenSource, secondaryHeader string) func(r *http.Request) error { - refreshableAuth := auth.NewCachedTokenSource(primary) - refreshableSecondary := auth.NewCachedTokenSource(secondary) + refreshableAuth := auth.NewCachedTokenSource(authconv.AuthTokenSource(primary)) + refreshableSecondary := auth.NewCachedTokenSource(authconv.AuthTokenSource(secondary)) return func(r *http.Request) error { - inner, err := refreshableAuth.Token() + inner, err := refreshableAuth.Token(context.Background()) if err != nil { return fmt.Errorf("inner token: %w", err) } inner.SetAuthHeader(r) - cloud, err := refreshableSecondary.Token() + cloud, err := refreshableSecondary.Token(context.Background()) if err != nil { return fmt.Errorf("cloud token: %w", err) } @@ -33,9 +35,9 @@ func serviceToServiceVisitor(primary, secondary oauth2.TokenSource, secondaryHea // The same as serviceToServiceVisitor, but without a secondary token source. func refreshableVisitor(inner oauth2.TokenSource) func(r *http.Request) error { - cts := auth.NewCachedTokenSource(inner) + cts := auth.NewCachedTokenSource(authconv.AuthTokenSource(inner)) return func(r *http.Request) error { - inner, err := cts.Token() + inner, err := cts.Token(context.Background()) if err != nil { return fmt.Errorf("inner token: %w", err) } @@ -63,7 +65,10 @@ func azureReuseTokenSource(t *oauth2.Token, ts oauth2.TokenSource) oauth2.TokenS return t }) - return auth.NewCachedTokenSource(early, auth.WithCachedToken(t)) + return authconv.OAuth2TokenSource(auth.NewCachedTokenSource( + authconv.AuthTokenSource(early), + auth.WithCachedToken(t), + )) } func wrap(ts oauth2.TokenSource, fn func(*oauth2.Token) *oauth2.Token) oauth2.TokenSource { From 815cace601ed08e11794d9c20e8c42e6af376f4a Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Fri, 7 Feb 2025 13:45:52 +0100 Subject: [PATCH 12/54] [Internal] Add support for asynchronous data plane token refreshes (#1142) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit DO NOT MERGE: `api.go` and `impl.go` are here to help with review and will be removed before merge. These are supposed to be added later on via code generation. ## What changes are proposed in this pull request? This PR is a step towards enabling asynchronous refreshes of data plane tokens. The PR introduces a new interface `dataplane.EndpointTokenSource` which returns data plane tokens for a given endpoint and authorization detail. The default implementation relies on `auth.NewCachedTokenSource` to wrap the underlying TokenSources. Note: async refreshes are disabled at the moment and will be enabled in a follow-up PR. **About Config.GetTokenSource():** My first attempt to implement `GetTokenSource()` returned an error if the `TokenSource` could not be built (e.g. because of config initialization errors). This didn't work because: 1. service constructors do not return errors and thus had to panic; 2. service constructors are called systematically as soon as the `WorkspaceClient` is created — sometimes with a config that is not compatible with `GetTokenSource()`. Returning a `TokenSource` provider instead of a `TokenSource` would have solved the problem but did not feel right. The current solution (i.e. return an ever-failing token source) is the cleanest solution I could think off. ## How is this tested? Complete unit test coverage of the new package. --- config/config.go | 36 +++++--- config/experimental/auth/auth.go | 14 +++ config/experimental/auth/auth_test.go | 12 +-- .../experimental/auth/dataplane/dataplane.go | 87 +++++++++++++++++++ .../auth/dataplane/dataplane_test.go | 72 +++++++++++++++ 5 files changed, 202 insertions(+), 19 deletions(-) create mode 100644 config/experimental/auth/dataplane/dataplane.go create mode 100644 config/experimental/auth/dataplane/dataplane_test.go diff --git a/config/config.go b/config/config.go index b451d3ecc..b1c9cd75c 100644 --- a/config/config.go +++ b/config/config.go @@ -14,6 +14,8 @@ import ( "github.com/databricks/databricks-sdk-go/common" "github.com/databricks/databricks-sdk-go/common/environment" "github.com/databricks/databricks-sdk-go/config/credentials" + "github.com/databricks/databricks-sdk-go/config/experimental/auth" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/authconv" "github.com/databricks/databricks-sdk-go/httpclient" "github.com/databricks/databricks-sdk-go/logger" "golang.org/x/oauth2" @@ -220,24 +222,38 @@ func (c *Config) Authenticate(r *http.Request) error { return c.credentialsProvider.SetHeaders(r) } -// Authenticate returns an OAuth token for the current configuration. -// It will return an error if the CredentialsStrategy does not support OAuth tokens. +// Authenticate returns an OAuth token for the current configuration. It will +// return an error if the CredentialsStrategy does not support OAuth tokens. +// +// Deprecated: Use GetTokenSource instead. func (c *Config) GetToken() (*oauth2.Token, error) { - err := c.EnsureResolved() - if err != nil { - return nil, err + ts := c.GetTokenSource() + return ts.Token(context.Background()) +} + +// GetTokenSource returns an OAuth token source for the current configuration. +// It will return an error if the CredentialsStrategy does not support OAuth +// tokens. +func (c *Config) GetTokenSource() auth.TokenSource { + if err := c.EnsureResolved(); err != nil { + return errorTokenSource(err) } - err = c.authenticateIfNeeded() - if err != nil { - return nil, err + if err := c.authenticateIfNeeded(); err != nil { + return errorTokenSource(err) } if h, ok := c.credentialsProvider.(credentials.OAuthCredentialsProvider); ok { - return h.Token() + return authconv.AuthTokenSource(h) } else { - return nil, fmt.Errorf("OAuth Token not supported for current auth type %s", c.AuthType) + return errorTokenSource(fmt.Errorf("OAuth Token not supported for current auth type %s", c.AuthType)) } } +func errorTokenSource(err error) auth.TokenSource { + return auth.TokenSourceFn(func(context.Context) (*oauth2.Token, error) { + return nil, err + }) +} + // IsAzure returns if the client is configured for Azure Databricks. func (c *Config) IsAzure() bool { if c.AzureResourceID != "" { diff --git a/config/experimental/auth/auth.go b/config/experimental/auth/auth.go index abea1aefd..e0320e30a 100644 --- a/config/experimental/auth/auth.go +++ b/config/experimental/auth/auth.go @@ -29,6 +29,20 @@ type TokenSource interface { Token(context.Context) (*oauth2.Token, error) } +// TokenSourceFn is an adapter to allow the use of ordinary functions as +// TokenSource. +// +// Example: +// +// ts := TokenSourceFn(func(ctx context.Context) (*oauth2.Token, error) { +// return &oauth2.Token{}, nil +// }) +type TokenSourceFn func(context.Context) (*oauth2.Token, error) + +func (fn TokenSourceFn) Token(ctx context.Context) (*oauth2.Token, error) { + return fn(ctx) +} + type Option func(*cachedTokenSource) // WithCachedToken sets the initial token to be used by a cached token source. diff --git a/config/experimental/auth/auth_test.go b/config/experimental/auth/auth_test.go index 24a0d13bf..96197299a 100644 --- a/config/experimental/auth/auth_test.go +++ b/config/experimental/auth/auth_test.go @@ -12,12 +12,6 @@ import ( "golang.org/x/oauth2" ) -type mockTokenSource func() (*oauth2.Token, error) - -func (m mockTokenSource) Token(_ context.Context) (*oauth2.Token, error) { - return m() -} - func TestNewCachedTokenSource_noCaching(t *testing.T) { want := &cachedTokenSource{} got := NewCachedTokenSource(want, nil) @@ -27,7 +21,7 @@ func TestNewCachedTokenSource_noCaching(t *testing.T) { } func TestNewCachedTokenSource_default(t *testing.T) { - ts := mockTokenSource(func() (*oauth2.Token, error) { + ts := TokenSourceFn(func(_ context.Context) (*oauth2.Token, error) { return nil, nil }) @@ -48,7 +42,7 @@ func TestNewCachedTokenSource_default(t *testing.T) { } func TestNewCachedTokenSource_options(t *testing.T) { - ts := mockTokenSource(func() (*oauth2.Token, error) { + ts := TokenSourceFn(func(_ context.Context) (*oauth2.Token, error) { return nil, nil }) @@ -248,7 +242,7 @@ func TestCachedTokenSource_Token(t *testing.T) { staleDuration: 10 * time.Minute, cachedToken: tc.cachedToken, timeNow: func() time.Time { return now }, - tokenSource: mockTokenSource(func() (*oauth2.Token, error) { + tokenSource: TokenSourceFn(func(_ context.Context) (*oauth2.Token, error) { atomic.AddInt32(&gotCalls, 1) return tc.returnedToken, tc.returnedError }), diff --git a/config/experimental/auth/dataplane/dataplane.go b/config/experimental/auth/dataplane/dataplane.go new file mode 100644 index 000000000..473032cf8 --- /dev/null +++ b/config/experimental/auth/dataplane/dataplane.go @@ -0,0 +1,87 @@ +// Package dataplane is an experimental package that provides a token source to +// directly access Databricks data plane. +package dataplane + +import ( + "context" + "sync" + + "github.com/databricks/databricks-sdk-go/config/experimental/auth" + "golang.org/x/oauth2" +) + +// OAuthClient is an interface for Databricks OAuth client. +type OAuthClient interface { + GetOAuthToken(ctx context.Context, authDetails string, t *oauth2.Token) (*oauth2.Token, error) +} + +// EndpointTokenSource is anything that returns tokens given a data plane +// endpoint and authentication details. +type EndpointTokenSource interface { + Token(ctx context.Context, endpoint string, authDetails string) (*oauth2.Token, error) +} + +// NewEndpointTokenSource returns a new EndpointTokenSource that uses the given +// OAuthClient and control plane TokenSource. +func NewEndpointTokenSource(c OAuthClient, cpts auth.TokenSource) *dataPlaneTokenSource { + return &dataPlaneTokenSource{ + client: c, + cpts: auth.NewCachedTokenSource( + cpts, + auth.WithAsyncRefresh(false), // TODO: Enable async refreshes once the feature is stable. + ), + } +} + +type tsKey struct { + endpoint string + authDetails string +} + +// dataPlaneTokenSource implements the EndpointTokenSource interface. +// +// For a given endpoint and authentication details, it uses the control plane +// TokenSource to retrieve the control plane token, that is then used to +// retrieve the data plane token through the OAuthClient. +// +// Each token source is cached to avoid unnecessary token requests. +type dataPlaneTokenSource struct { + client OAuthClient + cpts auth.TokenSource + sources sync.Map +} + +// Token returns a token for the given endpoint and authentication details. +func (dpts *dataPlaneTokenSource) Token(ctx context.Context, endpoint string, authDetails string) (*oauth2.Token, error) { + key := tsKey{endpoint: endpoint, authDetails: authDetails} + + if a, ok := dpts.sources.Load(key); ok { // happy path + return a.(auth.TokenSource).Token(ctx) + } + + ts := auth.NewCachedTokenSource( + &tokenSource{ + client: dpts.client, + cpts: dpts.cpts, + authDetails: authDetails, + }, + auth.WithAsyncRefresh(false), // TODO: Enable async refresh once the feature is stable. + ) + dpts.sources.Store(key, ts) + + return ts.Token(ctx) +} + +type tokenSource struct { + client OAuthClient + cpts auth.TokenSource + authDetails string +} + +func (dpts *tokenSource) Token(ctx context.Context) (*oauth2.Token, error) { + innerToken, err := dpts.cpts.Token(ctx) + if err != nil { + return nil, err + } + return dpts.client.GetOAuthToken(ctx, dpts.authDetails, innerToken) +} diff --git a/config/experimental/auth/dataplane/dataplane_test.go b/config/experimental/auth/dataplane/dataplane_test.go new file mode 100644 index 000000000..d02e61d06 --- /dev/null +++ b/config/experimental/auth/dataplane/dataplane_test.go @@ -0,0 +1,72 @@ +package dataplane + +import ( + "context" + "fmt" + "testing" + + "github.com/databricks/databricks-sdk-go/config/experimental/auth" + "golang.org/x/oauth2" +) + +type mockClient func(context.Context, string, *oauth2.Token) (*oauth2.Token, error) + +func (m mockClient) GetOAuthToken(ctx context.Context, authDetails string, t *oauth2.Token) (*oauth2.Token, error) { + return m(ctx, authDetails, t) +} + +func TestDataPlaneTokenSource_Token(t *testing.T) { + testErr := fmt.Errorf("test error") + testToken := &oauth2.Token{AccessToken: "access token"} + + testCases := []struct { + desc string + apiClient OAuthClient + cpts auth.TokenSource + wantToken *oauth2.Token + wantErr error + }{ + { + desc: "Failing control plane token source", + cpts: auth.TokenSourceFn(func(context.Context) (*oauth2.Token, error) { + return testToken, testErr + }), + wantErr: testErr, + }, + { + desc: "Failing oauth endpoint", + cpts: auth.TokenSourceFn(func(context.Context) (*oauth2.Token, error) { + return testToken, nil + }), + apiClient: mockClient(func(context.Context, string, *oauth2.Token) (*oauth2.Token, error) { + return nil, testErr + }), + wantErr: testErr, + }, + { + desc: "Successful token retrieval", + cpts: auth.TokenSourceFn(func(context.Context) (*oauth2.Token, error) { + return &oauth2.Token{AccessToken: "control plane test token"}, nil + }), + apiClient: mockClient(func(context.Context, string, *oauth2.Token) (*oauth2.Token, error) { + return testToken, nil + }), + wantToken: testToken, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + dpts := NewEndpointTokenSource(tc.apiClient, tc.cpts) + + gotToken, gotErr := dpts.Token(context.Background(), "endpoint", "authDetails") + + if gotErr != tc.wantErr { + t.Errorf("Token(): got error %v, want %v", gotErr, tc.wantErr) + } + if gotToken != tc.wantToken { + t.Errorf("Token(): got token %v, want %v", gotToken, tc.wantToken) + } + }) + } +} From 8307a4d467368f6a4290cba179a334d1f816ebd6 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Fri, 7 Feb 2025 17:37:22 +0100 Subject: [PATCH 13/54] [Feature] Enable async refreshes for OAuth tokens (#1143) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit ## What changes are proposed in this pull request? This PR enables async refreshes for cached token sources. This PR also: - Fixes a race condition bug in the unit tests by adding a small delay to the test refresh. - Remove one test in AzureCLI that is not correct anymore due to how the code is structured. Refactoring this to guarantee that the test pass is not trivial and ultimately out of the scope of the new system — as the cached token source is the one providing the guarantee. ## How is this tested? Fix unit tests. --- config/auth_azure_cli_test.go | 25 ------------------------- config/experimental/auth/auth.go | 12 +++--------- config/experimental/auth/auth_test.go | 9 +++++---- 3 files changed, 8 insertions(+), 38 deletions(-) diff --git a/config/auth_azure_cli_test.go b/config/auth_azure_cli_test.go index c2c8f7422..af02e97c4 100644 --- a/config/auth_azure_cli_test.go +++ b/config/auth_azure_cli_test.go @@ -118,31 +118,6 @@ func TestAzureCliCredentials_Valid(t *testing.T) { assert.Equal(t, "...", r.Header.Get("X-Databricks-Azure-SP-Management-Token")) } -func TestAzureCliCredentials_ReuseTokens(t *testing.T) { - env.CleanupEnvironment(t) - os.Setenv("PATH", testdataPath()) - os.Setenv("EXPIRE", "10M") - - // Use temporary file to store the number of calls to the AZ CLI. - tmp := t.TempDir() - count := filepath.Join(tmp, "count") - os.Setenv("COUNT", count) - - aa := AzureCliCredentials{} - visitor, err := aa.Configure(context.Background(), azDummy) - assert.NoError(t, err) - - r := &http.Request{Header: http.Header{}} - err = visitor.SetHeaders(r) - assert.NoError(t, err) - - // We verify the headers in the test above. - // This test validates we do not call the AZ CLI more than we need. - buf, err := os.ReadFile(count) - require.NoError(t, err) - assert.Len(t, buf, 2, "Expected the AZ CLI to be called twice") -} - func TestAzureCliCredentials_ValidNoManagementAccess(t *testing.T) { env.CleanupEnvironment(t) os.Setenv("PATH", testdataPath()) diff --git a/config/experimental/auth/auth.go b/config/experimental/auth/auth.go index e0320e30a..0a73be62c 100644 --- a/config/experimental/auth/auth.go +++ b/config/experimental/auth/auth.go @@ -16,10 +16,6 @@ const ( // Default duration for the stale period. The number as been set arbitrarily // and might be changed in the future. defaultStaleDuration = 3 * time.Minute - - // Disable the asynchronous token refresh by default. This is meant to - // change in the future once the feature is stable. - defaultDisableAsyncRefresh = true ) // A TokenSource is anything that can return a token. @@ -59,9 +55,9 @@ func WithAsyncRefresh(b bool) Option { } } -// NewCachedTokenProvider wraps a [oauth2.TokenSource] to cache the tokens -// it returns. By default, the cache will refresh tokens asynchronously a few -// minutes before they expire. +// NewCachedTokenProvider wraps a [TokenSource] to cache the tokens it returns. +// By default, the cache will refresh tokens asynchronously a few minutes before +// they expire. // // The token cache is safe for concurrent use by multiple goroutines and will // guarantee that only one token refresh is triggered at a time. @@ -83,8 +79,6 @@ func NewCachedTokenSource(ts TokenSource, opts ...Option) TokenSource { cts := &cachedTokenSource{ tokenSource: ts, staleDuration: defaultStaleDuration, - disableAsync: defaultDisableAsyncRefresh, - cachedToken: nil, timeNow: time.Now, } diff --git a/config/experimental/auth/auth_test.go b/config/experimental/auth/auth_test.go index 96197299a..9fbaefa4f 100644 --- a/config/experimental/auth/auth_test.go +++ b/config/experimental/auth/auth_test.go @@ -33,8 +33,8 @@ func TestNewCachedTokenSource_default(t *testing.T) { if got.staleDuration != defaultStaleDuration { t.Errorf("NewCachedTokenSource() staleDuration = %v, want %v", got.staleDuration, defaultStaleDuration) } - if got.disableAsync != defaultDisableAsyncRefresh { - t.Errorf("NewCachedTokenSource() disableAsync = %v, want %v", got.disableAsync, defaultDisableAsyncRefresh) + if got.disableAsync != false { + t.Errorf("NewCachedTokenSource() disableAsync = %v, want %v", got.disableAsync, false) } if got.cachedToken != nil { t.Errorf("NewCachedTokenSource() cachedToken = %v, want nil", got.cachedToken) @@ -221,7 +221,7 @@ func TestCachedTokenSource_Token(t *testing.T) { desc: "[Async] stale cached token, expired token returned", cachedToken: &oauth2.Token{Expiry: now.Add(1 * time.Minute)}, returnedToken: &oauth2.Token{Expiry: now.Add(-1 * time.Second)}, - wantCalls: 10, + wantCalls: 1, wantToken: &oauth2.Token{Expiry: now.Add(-1 * time.Second)}, }, { @@ -244,6 +244,7 @@ func TestCachedTokenSource_Token(t *testing.T) { timeNow: func() time.Time { return now }, tokenSource: TokenSourceFn(func(_ context.Context) (*oauth2.Token, error) { atomic.AddInt32(&gotCalls, 1) + time.Sleep(10 * time.Millisecond) return tc.returnedToken, tc.returnedError }), } @@ -262,7 +263,7 @@ func TestCachedTokenSource_Token(t *testing.T) { // Wait for async refreshes to finish. This part is a little brittle // but necessary to ensure that the async refresh is done before // checking the results. - time.Sleep(10 * time.Millisecond) + time.Sleep(20 * time.Millisecond) if int(gotCalls) != tc.wantCalls { t.Errorf("want %d calls to cts.tokenSource.Token(), got %d", tc.wantCalls, gotCalls) From 9dc3c56fb0afb65e8597f205db087fb2c6cca21d Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Tue, 11 Feb 2025 11:32:32 +0100 Subject: [PATCH 14/54] [Release] Release v0.58.0 (#1144) ### New Features and Improvements * Enable async refreshes for OAuth tokens ([#1143](https://github.com/databricks/databricks-sdk-go/pull/1143)). ### Internal Changes * Add support for asynchronous data plane token refreshes ([#1142](https://github.com/databricks/databricks-sdk-go/pull/1142)). * Introduce new TokenSource interface that takes a `context.Context` ([#1141](https://github.com/databricks/databricks-sdk-go/pull/1141)). ### API Changes: * Added `GetMessageQueryResultByAttachment` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. * Added `Id` field for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). * Added `LimitConfig` field for [billing.UpdateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#UpdateBudgetPolicyRequest). * Added `Volumes` field for [compute.ClusterLogConf](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ClusterLogConf). * [Breaking] Removed `ReviewState`, `Reviews` and `RunnerCollaborators` fields for [cleanrooms.CleanRoomAssetNotebook](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetNotebook). OpenAPI SHA: 99f644e72261ef5ecf8d74db20f4b7a1e09723cc, Date: 2025-02-11 --- .codegen/_openapi_sha | 2 +- .gitattributes | 5 + CHANGELOG.md | 24 + .../dashboards/mock_genie_interface.go | 121 +++ service/apps/api.go | 80 +- service/apps/impl.go | 76 +- service/apps/model.go | 2 + service/billing/api.go | 115 --- service/billing/impl.go | 114 ++- service/billing/model.go | 8 + service/catalog/api.go | 895 +----------------- service/catalog/impl.go | 865 ++++++++++++++++- service/cleanrooms/api.go | 120 +-- service/cleanrooms/impl.go | 111 ++- service/cleanrooms/model.go | 56 -- service/compute/api.go | 401 +------- service/compute/impl.go | 381 +++++++- service/compute/model.go | 72 +- service/dashboards/api.go | 134 +-- service/dashboards/impl.go | 111 ++- service/dashboards/interface.go | 7 + service/dashboards/model.go | 18 +- service/files/api.go | 100 +- service/files/impl.go | 93 +- service/iam/api.go | 331 +------ service/iam/impl.go | 317 ++++++- service/jobs/api.go | 123 --- service/jobs/impl.go | 119 ++- service/marketplace/api.go | 648 +------------ service/marketplace/impl.go | 612 +++++++++++- service/ml/api.go | 445 --------- service/ml/impl.go | 424 ++++++++- service/oauth2/api.go | 239 +---- service/oauth2/impl.go | 224 ++++- service/pipelines/api.go | 82 +- service/pipelines/impl.go | 78 +- service/serving/api.go | 30 - service/serving/impl.go | 30 +- service/serving/model.go | 4 +- service/settings/api.go | 254 +---- service/settings/impl.go | 240 ++++- service/sharing/api.go | 172 +--- service/sharing/impl.go | 178 +++- service/sharing/interface.go | 2 +- service/sql/api.go | 278 +----- service/sql/impl.go | 266 +++++- service/vectorsearch/api.go | 74 -- service/vectorsearch/impl.go | 72 +- service/workspace/api.go | 248 +---- service/workspace/impl.go | 234 ++++- version/version.go | 2 +- 51 files changed, 4711 insertions(+), 4926 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 9a95107e8..562b72fcc 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -c72c58f97b950fcb924a90ef164bcb10cfcd5ece \ No newline at end of file +99f644e72261ef5ecf8d74db20f4b7a1e09723cc \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 1f04b9a10..d2da9a3e1 100644 --- a/.gitattributes +++ b/.gitattributes @@ -6,6 +6,7 @@ experimental/mocks/mock_account_client.go linguist-generated=true experimental/mocks/mock_workspace_client.go linguist-generated=true experimental/mocks/service/apps/mock_apps_interface.go linguist-generated=true experimental/mocks/service/billing/mock_billable_usage_interface.go linguist-generated=true +experimental/mocks/service/billing/mock_budget_policy_interface.go linguist-generated=true experimental/mocks/service/billing/mock_budgets_interface.go linguist-generated=true experimental/mocks/service/billing/mock_log_delivery_interface.go linguist-generated=true experimental/mocks/service/billing/mock_usage_dashboards_interface.go linguist-generated=true @@ -46,7 +47,9 @@ experimental/mocks/service/compute/mock_libraries_interface.go linguist-generate experimental/mocks/service/compute/mock_policy_compliance_for_clusters_interface.go linguist-generated=true experimental/mocks/service/compute/mock_policy_families_interface.go linguist-generated=true experimental/mocks/service/dashboards/mock_genie_interface.go linguist-generated=true +experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go linguist-generated=true experimental/mocks/service/dashboards/mock_lakeview_interface.go linguist-generated=true +experimental/mocks/service/dashboards/mock_query_execution_interface.go linguist-generated=true experimental/mocks/service/files/mock_dbfs_interface.go linguist-generated=true experimental/mocks/service/files/mock_files_interface.go linguist-generated=true experimental/mocks/service/iam/mock_access_control_interface.go linguist-generated=true @@ -106,6 +109,7 @@ experimental/mocks/service/settings/mock_default_namespace_interface.go linguist experimental/mocks/service/settings/mock_disable_legacy_access_interface.go linguist-generated=true experimental/mocks/service/settings/mock_disable_legacy_dbfs_interface.go linguist-generated=true experimental/mocks/service/settings/mock_disable_legacy_features_interface.go linguist-generated=true +experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go linguist-generated=true experimental/mocks/service/settings/mock_enhanced_security_monitoring_interface.go linguist-generated=true experimental/mocks/service/settings/mock_esm_enablement_account_interface.go linguist-generated=true experimental/mocks/service/settings/mock_ip_access_lists_interface.go linguist-generated=true @@ -132,6 +136,7 @@ experimental/mocks/service/sql/mock_queries_legacy_interface.go linguist-generat experimental/mocks/service/sql/mock_query_history_interface.go linguist-generated=true experimental/mocks/service/sql/mock_query_visualizations_interface.go linguist-generated=true experimental/mocks/service/sql/mock_query_visualizations_legacy_interface.go linguist-generated=true +experimental/mocks/service/sql/mock_redash_config_interface.go linguist-generated=true experimental/mocks/service/sql/mock_statement_execution_interface.go linguist-generated=true experimental/mocks/service/sql/mock_warehouses_interface.go linguist-generated=true experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go linguist-generated=true diff --git a/CHANGELOG.md b/CHANGELOG.md index 28fee5466..a2f2aaf25 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,29 @@ # Version changelog +## [Release] Release v0.58.0 + +### New Features and Improvements + + * Enable async refreshes for OAuth tokens ([#1143](https://github.com/databricks/databricks-sdk-go/pull/1143)). + + +### Internal Changes + + * Add support for asynchronous data plane token refreshes ([#1142](https://github.com/databricks/databricks-sdk-go/pull/1142)). + * Introduce new TokenSource interface that takes a `context.Context` ([#1141](https://github.com/databricks/databricks-sdk-go/pull/1141)). + + +### API Changes: + + * Added `GetMessageQueryResultByAttachment` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. + * Added `Id` field for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). + * Added `LimitConfig` field for [billing.UpdateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#UpdateBudgetPolicyRequest). + * Added `Volumes` field for [compute.ClusterLogConf](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#ClusterLogConf). + * Added . + * Removed `ReviewState`, `Reviews` and `RunnerCollaborators` fields for [cleanrooms.CleanRoomAssetNotebook](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetNotebook). + +OpenAPI SHA: 99f644e72261ef5ecf8d74db20f4b7a1e09723cc, Date: 2025-02-11 + ## [Release] Release v0.57.0 ### New Features and Improvements diff --git a/experimental/mocks/service/dashboards/mock_genie_interface.go b/experimental/mocks/service/dashboards/mock_genie_interface.go index e943c8b10..7a79503f6 100644 --- a/experimental/mocks/service/dashboards/mock_genie_interface.go +++ b/experimental/mocks/service/dashboards/mock_genie_interface.go @@ -397,6 +397,127 @@ func (_c *MockGenieInterface_GetMessageQueryResult_Call) RunAndReturn(run func(c return _c } +// GetMessageQueryResultByAttachment provides a mock function with given fields: ctx, request +func (_m *MockGenieInterface) GetMessageQueryResultByAttachment(ctx context.Context, request dashboards.GenieGetQueryResultByAttachmentRequest) (*dashboards.GenieGetMessageQueryResultResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetMessageQueryResultByAttachment") + } + + var r0 *dashboards.GenieGetMessageQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetQueryResultByAttachmentRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetQueryResultByAttachmentRequest) *dashboards.GenieGetMessageQueryResultResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGetMessageQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieGetQueryResultByAttachmentRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetMessageQueryResultByAttachment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMessageQueryResultByAttachment' +type MockGenieInterface_GetMessageQueryResultByAttachment_Call struct { + *mock.Call +} + +// GetMessageQueryResultByAttachment is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GenieGetQueryResultByAttachmentRequest +func (_e *MockGenieInterface_Expecter) GetMessageQueryResultByAttachment(ctx interface{}, request interface{}) *MockGenieInterface_GetMessageQueryResultByAttachment_Call { + return &MockGenieInterface_GetMessageQueryResultByAttachment_Call{Call: _e.mock.On("GetMessageQueryResultByAttachment", ctx, request)} +} + +func (_c *MockGenieInterface_GetMessageQueryResultByAttachment_Call) Run(run func(ctx context.Context, request dashboards.GenieGetQueryResultByAttachmentRequest)) *MockGenieInterface_GetMessageQueryResultByAttachment_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GenieGetQueryResultByAttachmentRequest)) + }) + return _c +} + +func (_c *MockGenieInterface_GetMessageQueryResultByAttachment_Call) Return(_a0 *dashboards.GenieGetMessageQueryResultResponse, _a1 error) *MockGenieInterface_GetMessageQueryResultByAttachment_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetMessageQueryResultByAttachment_Call) RunAndReturn(run func(context.Context, dashboards.GenieGetQueryResultByAttachmentRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)) *MockGenieInterface_GetMessageQueryResultByAttachment_Call { + _c.Call.Return(run) + return _c +} + +// GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId provides a mock function with given fields: ctx, spaceId, conversationId, messageId, attachmentId +func (_m *MockGenieInterface) GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*dashboards.GenieGetMessageQueryResultResponse, error) { + ret := _m.Called(ctx, spaceId, conversationId, messageId, attachmentId) + + if len(ret) == 0 { + panic("no return value specified for GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId") + } + + var r0 *dashboards.GenieGetMessageQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) (*dashboards.GenieGetMessageQueryResultResponse, error)); ok { + return rf(ctx, spaceId, conversationId, messageId, attachmentId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *dashboards.GenieGetMessageQueryResultResponse); ok { + r0 = rf(ctx, spaceId, conversationId, messageId, attachmentId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGetMessageQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, spaceId, conversationId, messageId, attachmentId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId' +type MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call struct { + *mock.Call +} + +// GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId is a helper method to define mock.On call +// - ctx context.Context +// - spaceId string +// - conversationId string +// - messageId string +// - attachmentId string +func (_e *MockGenieInterface_Expecter) GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx interface{}, spaceId interface{}, conversationId interface{}, messageId interface{}, attachmentId interface{}) *MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call { + return &MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call{Call: _e.mock.On("GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId", ctx, spaceId, conversationId, messageId, attachmentId)} +} + +func (_c *MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call) Run(run func(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string)) *MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string)) + }) + return _c +} + +func (_c *MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call) Return(_a0 *dashboards.GenieGetMessageQueryResultResponse, _a1 error) *MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call) RunAndReturn(run func(context.Context, string, string, string, string) (*dashboards.GenieGetMessageQueryResultResponse, error)) *MockGenieInterface_GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call { + _c.Call.Return(run) + return _c +} + // GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId provides a mock function with given fields: ctx, spaceId, conversationId, messageId func (_m *MockGenieInterface) GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*dashboards.GenieGetMessageQueryResultResponse, error) { ret := _m.Called(ctx, spaceId, conversationId, messageId) diff --git a/service/apps/api.go b/service/apps/api.go index 59f7aa2ff..eda1d7935 100755 --- a/service/apps/api.go +++ b/service/apps/api.go @@ -515,89 +515,11 @@ func (a *AppsAPI) GetPermissionsByAppName(ctx context.Context, appName string) ( }) } -// List apps. -// -// Lists all apps in the workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AppsAPI) List(ctx context.Context, request ListAppsRequest) listing.Iterator[App] { - - getNextPage := func(ctx context.Context, req ListAppsRequest) (*ListAppsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.appsImpl.List(ctx, req) - } - getItems := func(resp *ListAppsResponse) []App { - return resp.Apps - } - getNextReq := func(resp *ListAppsResponse) *ListAppsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List apps. -// -// Lists all apps in the workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AppsAPI) ListAll(ctx context.Context, request ListAppsRequest) ([]App, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[App](ctx, iterator) -} - -// List app deployments. -// -// Lists all app deployments for the app with the supplied name. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AppsAPI) ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) listing.Iterator[AppDeployment] { - - getNextPage := func(ctx context.Context, req ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.appsImpl.ListDeployments(ctx, req) - } - getItems := func(resp *ListAppDeploymentsResponse) []AppDeployment { - return resp.AppDeployments - } - getNextReq := func(resp *ListAppDeploymentsResponse) *ListAppDeploymentsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List app deployments. -// -// Lists all app deployments for the app with the supplied name. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AppsAPI) ListDeploymentsAll(ctx context.Context, request ListAppDeploymentsRequest) ([]AppDeployment, error) { - iterator := a.ListDeployments(ctx, request) - return listing.ToSlice[AppDeployment](ctx, iterator) -} - // List app deployments. // // Lists all app deployments for the app with the supplied name. func (a *AppsAPI) ListDeploymentsByAppName(ctx context.Context, appName string) (*ListAppDeploymentsResponse, error) { - return a.appsImpl.ListDeployments(ctx, ListAppDeploymentsRequest{ + return a.appsImpl.internalListDeployments(ctx, ListAppDeploymentsRequest{ AppName: appName, }) } diff --git a/service/apps/impl.go b/service/apps/impl.go index b83aec396..ee0eaf22b 100755 --- a/service/apps/impl.go +++ b/service/apps/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" "golang.org/x/exp/slices" ) @@ -91,7 +93,42 @@ func (a *appsImpl) GetPermissions(ctx context.Context, request GetAppPermissions return &appPermissions, err } -func (a *appsImpl) List(ctx context.Context, request ListAppsRequest) (*ListAppsResponse, error) { +// List apps. +// +// Lists all apps in the workspace. +func (a *appsImpl) List(ctx context.Context, request ListAppsRequest) listing.Iterator[App] { + + getNextPage := func(ctx context.Context, req ListAppsRequest) (*ListAppsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAppsResponse) []App { + return resp.Apps + } + getNextReq := func(resp *ListAppsResponse) *ListAppsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List apps. +// +// Lists all apps in the workspace. +func (a *appsImpl) ListAll(ctx context.Context, request ListAppsRequest) ([]App, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[App](ctx, iterator) +} + +func (a *appsImpl) internalList(ctx context.Context, request ListAppsRequest) (*ListAppsResponse, error) { var listAppsResponse ListAppsResponse path := "/api/2.0/apps" queryParams := make(map[string]any) @@ -101,7 +138,42 @@ func (a *appsImpl) List(ctx context.Context, request ListAppsRequest) (*ListApps return &listAppsResponse, err } -func (a *appsImpl) ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { +// List app deployments. +// +// Lists all app deployments for the app with the supplied name. +func (a *appsImpl) ListDeployments(ctx context.Context, request ListAppDeploymentsRequest) listing.Iterator[AppDeployment] { + + getNextPage := func(ctx context.Context, req ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListDeployments(ctx, req) + } + getItems := func(resp *ListAppDeploymentsResponse) []AppDeployment { + return resp.AppDeployments + } + getNextReq := func(resp *ListAppDeploymentsResponse) *ListAppDeploymentsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List app deployments. +// +// Lists all app deployments for the app with the supplied name. +func (a *appsImpl) ListDeploymentsAll(ctx context.Context, request ListAppDeploymentsRequest) ([]AppDeployment, error) { + iterator := a.ListDeployments(ctx, request) + return listing.ToSlice[AppDeployment](ctx, iterator) +} + +func (a *appsImpl) internalListDeployments(ctx context.Context, request ListAppDeploymentsRequest) (*ListAppDeploymentsResponse, error) { var listAppDeploymentsResponse ListAppDeploymentsResponse path := fmt.Sprintf("/api/2.0/apps/%v/deployments", request.AppName) queryParams := make(map[string]any) diff --git a/service/apps/model.go b/service/apps/model.go index 0bced9bce..26d7b2b0c 100755 --- a/service/apps/model.go +++ b/service/apps/model.go @@ -26,6 +26,8 @@ type App struct { DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` // The description of the app. Description string `json:"description,omitempty"` + // The unique identifier of the app. + Id string `json:"id,omitempty"` // The name of the app. The name must contain only lowercase alphanumeric // characters and hyphens. It must be unique within the workspace. Name string `json:"name"` diff --git a/service/billing/api.go b/service/billing/api.go index ae62bb149..8f44b347b 100755 --- a/service/billing/api.go +++ b/service/billing/api.go @@ -123,47 +123,6 @@ func (a *BudgetPolicyAPI) GetByPolicyId(ctx context.Context, policyId string) (* }) } -// List policies. -// -// Lists all policies. Policies are returned in the alphabetically ascending -// order of their names. -// -// This method is generated by Databricks SDK Code Generator. -func (a *BudgetPolicyAPI) List(ctx context.Context, request ListBudgetPoliciesRequest) listing.Iterator[BudgetPolicy] { - - getNextPage := func(ctx context.Context, req ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.budgetPolicyImpl.List(ctx, req) - } - getItems := func(resp *ListBudgetPoliciesResponse) []BudgetPolicy { - return resp.Policies - } - getNextReq := func(resp *ListBudgetPoliciesResponse) *ListBudgetPoliciesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List policies. -// -// Lists all policies. Policies are returned in the alphabetically ascending -// order of their names. -// -// This method is generated by Databricks SDK Code Generator. -func (a *BudgetPolicyAPI) ListAll(ctx context.Context, request ListBudgetPoliciesRequest) ([]BudgetPolicy, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[BudgetPolicy](ctx, iterator) -} - type BudgetsInterface interface { // Create new budget. @@ -253,45 +212,6 @@ func (a *BudgetsAPI) GetByBudgetId(ctx context.Context, budgetId string) (*GetBu }) } -// Get all budgets. -// -// Gets all budgets associated with this account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *BudgetsAPI) List(ctx context.Context, request ListBudgetConfigurationsRequest) listing.Iterator[BudgetConfiguration] { - - getNextPage := func(ctx context.Context, req ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.budgetsImpl.List(ctx, req) - } - getItems := func(resp *ListBudgetConfigurationsResponse) []BudgetConfiguration { - return resp.Budgets - } - getNextReq := func(resp *ListBudgetConfigurationsResponse) *ListBudgetConfigurationsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Get all budgets. -// -// Gets all budgets associated with this account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *BudgetsAPI) ListAll(ctx context.Context, request ListBudgetConfigurationsRequest) ([]BudgetConfiguration, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[BudgetConfiguration](ctx, iterator) -} - type LogDeliveryInterface interface { // Create a new log delivery configuration. @@ -462,41 +382,6 @@ func (a *LogDeliveryAPI) GetByLogDeliveryConfigurationId(ctx context.Context, lo }) } -// Get all log delivery configurations. -// -// Gets all Databricks log delivery configurations associated with an account -// specified by ID. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LogDeliveryAPI) List(ctx context.Context, request ListLogDeliveryRequest) listing.Iterator[LogDeliveryConfiguration] { - - getNextPage := func(ctx context.Context, req ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.logDeliveryImpl.List(ctx, req) - } - getItems := func(resp *WrappedLogDeliveryConfigurations) []LogDeliveryConfiguration { - return resp.LogDeliveryConfigurations - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get all log delivery configurations. -// -// Gets all Databricks log delivery configurations associated with an account -// specified by ID. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LogDeliveryAPI) ListAll(ctx context.Context, request ListLogDeliveryRequest) ([]LogDeliveryConfiguration, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[LogDeliveryConfiguration](ctx, iterator) -} - // LogDeliveryConfigurationConfigNameToConfigIdMap calls [LogDeliveryAPI.ListAll] and creates a map of results with [LogDeliveryConfiguration].ConfigName as key and [LogDeliveryConfiguration].ConfigId as value. // // Returns an error if there's more than one [LogDeliveryConfiguration] with the same .ConfigName. diff --git a/service/billing/impl.go b/service/billing/impl.go index 330e181ee..0ea95a456 100755 --- a/service/billing/impl.go +++ b/service/billing/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just BillableUsage API methods @@ -61,7 +63,44 @@ func (a *budgetPolicyImpl) Get(ctx context.Context, request GetBudgetPolicyReque return &budgetPolicy, err } -func (a *budgetPolicyImpl) List(ctx context.Context, request ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { +// List policies. +// +// Lists all policies. Policies are returned in the alphabetically ascending +// order of their names. +func (a *budgetPolicyImpl) List(ctx context.Context, request ListBudgetPoliciesRequest) listing.Iterator[BudgetPolicy] { + + getNextPage := func(ctx context.Context, req ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListBudgetPoliciesResponse) []BudgetPolicy { + return resp.Policies + } + getNextReq := func(resp *ListBudgetPoliciesResponse) *ListBudgetPoliciesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List policies. +// +// Lists all policies. Policies are returned in the alphabetically ascending +// order of their names. +func (a *budgetPolicyImpl) ListAll(ctx context.Context, request ListBudgetPoliciesRequest) ([]BudgetPolicy, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[BudgetPolicy](ctx, iterator) +} + +func (a *budgetPolicyImpl) internalList(ctx context.Context, request ListBudgetPoliciesRequest) (*ListBudgetPoliciesResponse, error) { var listBudgetPoliciesResponse ListBudgetPoliciesResponse path := fmt.Sprintf("/api/2.1/accounts/%v/budget-policies", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -75,6 +114,9 @@ func (a *budgetPolicyImpl) Update(ctx context.Context, request UpdateBudgetPolic var budgetPolicy BudgetPolicy path := fmt.Sprintf("/api/2.1/accounts/%v/budget-policies/%v", a.client.ConfiguredAccountID(), request.PolicyId) queryParams := make(map[string]any) + if request.LimitConfig != nil { + queryParams["limit_config"] = request.LimitConfig + } headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" @@ -118,7 +160,42 @@ func (a *budgetsImpl) Get(ctx context.Context, request GetBudgetConfigurationReq return &getBudgetConfigurationResponse, err } -func (a *budgetsImpl) List(ctx context.Context, request ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { +// Get all budgets. +// +// Gets all budgets associated with this account. +func (a *budgetsImpl) List(ctx context.Context, request ListBudgetConfigurationsRequest) listing.Iterator[BudgetConfiguration] { + + getNextPage := func(ctx context.Context, req ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListBudgetConfigurationsResponse) []BudgetConfiguration { + return resp.Budgets + } + getNextReq := func(resp *ListBudgetConfigurationsResponse) *ListBudgetConfigurationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get all budgets. +// +// Gets all budgets associated with this account. +func (a *budgetsImpl) ListAll(ctx context.Context, request ListBudgetConfigurationsRequest) ([]BudgetConfiguration, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[BudgetConfiguration](ctx, iterator) +} + +func (a *budgetsImpl) internalList(ctx context.Context, request ListBudgetConfigurationsRequest) (*ListBudgetConfigurationsResponse, error) { var listBudgetConfigurationsResponse ListBudgetConfigurationsResponse path := fmt.Sprintf("/api/2.1/accounts/%v/budgets", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -165,7 +242,38 @@ func (a *logDeliveryImpl) Get(ctx context.Context, request GetLogDeliveryRequest return &wrappedLogDeliveryConfiguration, err } -func (a *logDeliveryImpl) List(ctx context.Context, request ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { +// Get all log delivery configurations. +// +// Gets all Databricks log delivery configurations associated with an account +// specified by ID. +func (a *logDeliveryImpl) List(ctx context.Context, request ListLogDeliveryRequest) listing.Iterator[LogDeliveryConfiguration] { + + getNextPage := func(ctx context.Context, req ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *WrappedLogDeliveryConfigurations) []LogDeliveryConfiguration { + return resp.LogDeliveryConfigurations + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all log delivery configurations. +// +// Gets all Databricks log delivery configurations associated with an account +// specified by ID. +func (a *logDeliveryImpl) ListAll(ctx context.Context, request ListLogDeliveryRequest) ([]LogDeliveryConfiguration, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[LogDeliveryConfiguration](ctx, iterator) +} + +func (a *logDeliveryImpl) internalList(ctx context.Context, request ListLogDeliveryRequest) (*WrappedLogDeliveryConfigurations, error) { var wrappedLogDeliveryConfigurations WrappedLogDeliveryConfigurations path := fmt.Sprintf("/api/2.0/accounts/%v/log-delivery", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) diff --git a/service/billing/model.go b/service/billing/model.go index 1829f9c72..641a70fad 100644 --- a/service/billing/model.go +++ b/service/billing/model.go @@ -675,6 +675,11 @@ type GetLogDeliveryRequest struct { LogDeliveryConfigurationId string `json:"-" url:"-"` } +// The limit configuration of the policy. Limit configuration provide a budget +// policy level cost control by enforcing the limit. +type LimitConfig struct { +} + // Get all budgets type ListBudgetConfigurationsRequest struct { // A page token received from a previous get all budget configurations call. @@ -1104,6 +1109,9 @@ type UpdateBudgetConfigurationResponse struct { // Update a budget policy type UpdateBudgetPolicyRequest struct { + // DEPRECATED. This is redundant field as LimitConfig is part of the + // BudgetPolicy + LimitConfig *LimitConfig `json:"-" url:"limit_config,omitempty"` // Contains the BudgetPolicy details. Policy *BudgetPolicy `json:"policy,omitempty"` // The Id of the policy. This field is generated by Databricks and globally diff --git a/service/catalog/api.go b/service/catalog/api.go index 46d38dd3b..04b38a2cf 100755 --- a/service/catalog/api.go +++ b/service/catalog/api.go @@ -114,47 +114,12 @@ func (a *AccountMetastoreAssignmentsAPI) GetByWorkspaceId(ctx context.Context, w }) } -// Get all workspaces assigned to a metastore. -// -// Gets a list of all Databricks workspace IDs that have been assigned to given -// metastore. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountMetastoreAssignmentsAPI) List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) listing.Iterator[int64] { - - getNextPage := func(ctx context.Context, req ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.accountMetastoreAssignmentsImpl.List(ctx, req) - } - getItems := func(resp *ListAccountMetastoreAssignmentsResponse) []int64 { - return resp.WorkspaceIds - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get all workspaces assigned to a metastore. -// -// Gets a list of all Databricks workspace IDs that have been assigned to given -// metastore. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountMetastoreAssignmentsAPI) ListAll(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) ([]int64, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[int64](ctx, iterator) -} - // Get all workspaces assigned to a metastore. // // Gets a list of all Databricks workspace IDs that have been assigned to given // metastore. func (a *AccountMetastoreAssignmentsAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountMetastoreAssignmentsResponse, error) { - return a.accountMetastoreAssignmentsImpl.List(ctx, ListAccountMetastoreAssignmentsRequest{ + return a.accountMetastoreAssignmentsImpl.internalList(ctx, ListAccountMetastoreAssignmentsRequest{ MetastoreId: metastoreId, }) } @@ -238,40 +203,6 @@ func (a *AccountMetastoresAPI) GetByMetastoreId(ctx context.Context, metastoreId }) } -// Get all metastores associated with an account. -// -// Gets all Unity Catalog metastores associated with an account specified by ID. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountMetastoresAPI) List(ctx context.Context) listing.Iterator[MetastoreInfo] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListMetastoresResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.accountMetastoresImpl.List(ctx) - } - getItems := func(resp *ListMetastoresResponse) []MetastoreInfo { - return resp.Metastores - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get all metastores associated with an account. -// -// Gets all Unity Catalog metastores associated with an account specified by ID. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountMetastoresAPI) ListAll(ctx context.Context) ([]MetastoreInfo, error) { - iterator := a.List(ctx) - return listing.ToSlice[MetastoreInfo](ctx, iterator) -} - type AccountStorageCredentialsInterface interface { // Create a storage credential. @@ -378,47 +309,12 @@ func (a *AccountStorageCredentialsAPI) GetByMetastoreIdAndStorageCredentialName( }) } -// Get all storage credentials assigned to a metastore. -// -// Gets a list of all storage credentials that have been assigned to given -// metastore. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountStorageCredentialsAPI) List(ctx context.Context, request ListAccountStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { - - getNextPage := func(ctx context.Context, req ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.accountStorageCredentialsImpl.List(ctx, req) - } - getItems := func(resp *ListAccountStorageCredentialsResponse) []StorageCredentialInfo { - return resp.StorageCredentials - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get all storage credentials assigned to a metastore. -// -// Gets a list of all storage credentials that have been assigned to given -// metastore. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountStorageCredentialsAPI) ListAll(ctx context.Context, request ListAccountStorageCredentialsRequest) ([]StorageCredentialInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[StorageCredentialInfo](ctx, iterator) -} - // Get all storage credentials assigned to a metastore. // // Gets a list of all storage credentials that have been assigned to given // metastore. func (a *AccountStorageCredentialsAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListAccountStorageCredentialsResponse, error) { - return a.accountStorageCredentialsImpl.List(ctx, ListAccountStorageCredentialsRequest{ + return a.accountStorageCredentialsImpl.internalList(ctx, ListAccountStorageCredentialsRequest{ MetastoreId: metastoreId, }) } @@ -575,53 +471,6 @@ func (a *CatalogsAPI) GetByName(ctx context.Context, name string) (*CatalogInfo, }) } -// List catalogs. -// -// Gets an array of catalogs in the metastore. If the caller is the metastore -// admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the -// caller (or for which the caller has the **USE_CATALOG** privilege) will be -// retrieved. There is no guarantee of a specific ordering of the elements in -// the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CatalogsAPI) List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo] { - - getNextPage := func(ctx context.Context, req ListCatalogsRequest) (*ListCatalogsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.catalogsImpl.List(ctx, req) - } - getItems := func(resp *ListCatalogsResponse) []CatalogInfo { - return resp.Catalogs - } - getNextReq := func(resp *ListCatalogsResponse) *ListCatalogsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List catalogs. -// -// Gets an array of catalogs in the metastore. If the caller is the metastore -// admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the -// caller (or for which the caller has the **USE_CATALOG** privilege) will be -// retrieved. There is no guarantee of a specific ordering of the elements in -// the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CatalogsAPI) ListAll(ctx context.Context, request ListCatalogsRequest) ([]CatalogInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[CatalogInfo](ctx, iterator) -} - type ConnectionsInterface interface { // Create a connection. @@ -723,45 +572,6 @@ func (a *ConnectionsAPI) GetByName(ctx context.Context, name string) (*Connectio }) } -// List connections. -// -// List all connections. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConnectionsAPI) List(ctx context.Context, request ListConnectionsRequest) listing.Iterator[ConnectionInfo] { - - getNextPage := func(ctx context.Context, req ListConnectionsRequest) (*ListConnectionsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.connectionsImpl.List(ctx, req) - } - getItems := func(resp *ListConnectionsResponse) []ConnectionInfo { - return resp.Connections - } - getNextReq := func(resp *ListConnectionsResponse) *ListConnectionsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List connections. -// -// List all connections. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConnectionsAPI) ListAll(ctx context.Context, request ListConnectionsRequest) ([]ConnectionInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ConnectionInfo](ctx, iterator) -} - // ConnectionInfoNameToFullNameMap calls [ConnectionsAPI.ListAll] and creates a map of results with [ConnectionInfo].Name as key and [ConnectionInfo].FullName as value. // // Returns an error if there's more than one [ConnectionInfo] with the same .Name. @@ -926,55 +736,6 @@ func (a *CredentialsAPI) GetCredentialByNameArg(ctx context.Context, nameArg str }) } -// List credentials. -// -// Gets an array of credentials (as __CredentialInfo__ objects). -// -// The array is limited to only the credentials that the caller has permission -// to access. If the caller is a metastore admin, retrieval of credentials is -// unrestricted. There is no guarantee of a specific ordering of the elements in -// the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CredentialsAPI) ListCredentials(ctx context.Context, request ListCredentialsRequest) listing.Iterator[CredentialInfo] { - - getNextPage := func(ctx context.Context, req ListCredentialsRequest) (*ListCredentialsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.credentialsImpl.ListCredentials(ctx, req) - } - getItems := func(resp *ListCredentialsResponse) []CredentialInfo { - return resp.Credentials - } - getNextReq := func(resp *ListCredentialsResponse) *ListCredentialsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List credentials. -// -// Gets an array of credentials (as __CredentialInfo__ objects). -// -// The array is limited to only the credentials that the caller has permission -// to access. If the caller is a metastore admin, retrieval of credentials is -// unrestricted. There is no guarantee of a specific ordering of the elements in -// the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CredentialsAPI) ListCredentialsAll(ctx context.Context, request ListCredentialsRequest) ([]CredentialInfo, error) { - iterator := a.ListCredentials(ctx, request) - return listing.ToSlice[CredentialInfo](ctx, iterator) -} - type ExternalLocationsInterface interface { // Create an external location. @@ -1086,53 +847,6 @@ func (a *ExternalLocationsAPI) GetByName(ctx context.Context, name string) (*Ext }) } -// List external locations. -// -// Gets an array of external locations (__ExternalLocationInfo__ objects) from -// the metastore. The caller must be a metastore admin, the owner of the -// external location, or a user that has some privilege on the external -// location. There is no guarantee of a specific ordering of the elements in the -// array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExternalLocationsAPI) List(ctx context.Context, request ListExternalLocationsRequest) listing.Iterator[ExternalLocationInfo] { - - getNextPage := func(ctx context.Context, req ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.externalLocationsImpl.List(ctx, req) - } - getItems := func(resp *ListExternalLocationsResponse) []ExternalLocationInfo { - return resp.ExternalLocations - } - getNextReq := func(resp *ListExternalLocationsResponse) *ListExternalLocationsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List external locations. -// -// Gets an array of external locations (__ExternalLocationInfo__ objects) from -// the metastore. The caller must be a metastore admin, the owner of the -// external location, or a user that has some privilege on the external -// location. There is no guarantee of a specific ordering of the elements in the -// array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExternalLocationsAPI) ListAll(ctx context.Context, request ListExternalLocationsRequest) ([]ExternalLocationInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ExternalLocationInfo](ctx, iterator) -} - type FunctionsInterface interface { // Create a function. @@ -1286,57 +1000,6 @@ func (a *FunctionsAPI) GetByName(ctx context.Context, name string) (*FunctionInf }) } -// List functions. -// -// List functions within the specified parent catalog and schema. If the user is -// a metastore admin, all functions are returned in the output list. Otherwise, -// the user must have the **USE_CATALOG** privilege on the catalog and the -// **USE_SCHEMA** privilege on the schema, and the output list contains only -// functions for which either the user has the **EXECUTE** privilege or the user -// is the owner. There is no guarantee of a specific ordering of the elements in -// the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *FunctionsAPI) List(ctx context.Context, request ListFunctionsRequest) listing.Iterator[FunctionInfo] { - - getNextPage := func(ctx context.Context, req ListFunctionsRequest) (*ListFunctionsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.functionsImpl.List(ctx, req) - } - getItems := func(resp *ListFunctionsResponse) []FunctionInfo { - return resp.Functions - } - getNextReq := func(resp *ListFunctionsResponse) *ListFunctionsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List functions. -// -// List functions within the specified parent catalog and schema. If the user is -// a metastore admin, all functions are returned in the output list. Otherwise, -// the user must have the **USE_CATALOG** privilege on the catalog and the -// **USE_SCHEMA** privilege on the schema, and the output list contains only -// functions for which either the user has the **EXECUTE** privilege or the user -// is the owner. There is no guarantee of a specific ordering of the elements in -// the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *FunctionsAPI) ListAll(ctx context.Context, request ListFunctionsRequest) ([]FunctionInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[FunctionInfo](ctx, iterator) -} - // FunctionInfoNameToFullNameMap calls [FunctionsAPI.ListAll] and creates a map of results with [FunctionInfo].Name as key and [FunctionInfo].FullName as value. // // Returns an error if there's more than one [FunctionInfo] with the same .Name. @@ -1592,44 +1255,6 @@ func (a *MetastoresAPI) GetById(ctx context.Context, id string) (*MetastoreInfo, }) } -// List metastores. -// -// Gets an array of the available metastores (as __MetastoreInfo__ objects). The -// caller must be an admin to retrieve this info. There is no guarantee of a -// specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *MetastoresAPI) List(ctx context.Context) listing.Iterator[MetastoreInfo] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListMetastoresResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.metastoresImpl.List(ctx) - } - getItems := func(resp *ListMetastoresResponse) []MetastoreInfo { - return resp.Metastores - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List metastores. -// -// Gets an array of the available metastores (as __MetastoreInfo__ objects). The -// caller must be an admin to retrieve this info. There is no guarantee of a -// specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *MetastoresAPI) ListAll(ctx context.Context) ([]MetastoreInfo, error) { - iterator := a.List(ctx) - return listing.ToSlice[MetastoreInfo](ctx, iterator) -} - // MetastoreInfoNameToMetastoreIdMap calls [MetastoresAPI.ListAll] and creates a map of results with [MetastoreInfo].Name as key and [MetastoreInfo].MetastoreId as value. // // Returns an error if there's more than one [MetastoreInfo] with the same .Name. @@ -1889,69 +1514,6 @@ func (a *ModelVersionsAPI) GetByAliasByFullNameAndAlias(ctx context.Context, ful }) } -// List Model Versions. -// -// List model versions. You can list model versions under a particular schema, -// or list all model versions in the current metastore. -// -// The returned models are filtered based on the privileges of the calling user. -// For example, the metastore admin is able to list all the model versions. A -// regular user needs to be the owner or have the **EXECUTE** privilege on the -// parent registered model to recieve the model versions in the response. For -// the latter case, the caller must also be the owner or have the -// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** -// privilege on the parent schema. -// -// There is no guarantee of a specific ordering of the elements in the response. -// The elements in the response will not contain any aliases or tags. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelVersionsAPI) List(ctx context.Context, request ListModelVersionsRequest) listing.Iterator[ModelVersionInfo] { - - getNextPage := func(ctx context.Context, req ListModelVersionsRequest) (*ListModelVersionsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.modelVersionsImpl.List(ctx, req) - } - getItems := func(resp *ListModelVersionsResponse) []ModelVersionInfo { - return resp.ModelVersions - } - getNextReq := func(resp *ListModelVersionsResponse) *ListModelVersionsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List Model Versions. -// -// List model versions. You can list model versions under a particular schema, -// or list all model versions in the current metastore. -// -// The returned models are filtered based on the privileges of the calling user. -// For example, the metastore admin is able to list all the model versions. A -// regular user needs to be the owner or have the **EXECUTE** privilege on the -// parent registered model to recieve the model versions in the response. For -// the latter case, the caller must also be the owner or have the -// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** -// privilege on the parent schema. -// -// There is no guarantee of a specific ordering of the elements in the response. -// The elements in the response will not contain any aliases or tags. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelVersionsAPI) ListAll(ctx context.Context, request ListModelVersionsRequest) ([]ModelVersionInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ModelVersionInfo](ctx, iterator) -} - // List Model Versions. // // List model versions. You can list model versions under a particular schema, @@ -1968,7 +1530,7 @@ func (a *ModelVersionsAPI) ListAll(ctx context.Context, request ListModelVersion // There is no guarantee of a specific ordering of the elements in the response. // The elements in the response will not contain any aliases or tags. func (a *ModelVersionsAPI) ListByFullName(ctx context.Context, fullName string) (*ListModelVersionsResponse, error) { - return a.modelVersionsImpl.List(ctx, ListModelVersionsRequest{ + return a.modelVersionsImpl.internalList(ctx, ListModelVersionsRequest{ FullName: fullName, }) } @@ -2697,67 +2259,6 @@ func (a *RegisteredModelsAPI) GetByFullName(ctx context.Context, fullName string }) } -// List Registered Models. -// -// List registered models. You can list registered models under a particular -// schema, or list all registered models in the current metastore. -// -// The returned models are filtered based on the privileges of the calling user. -// For example, the metastore admin is able to list all the registered models. A -// regular user needs to be the owner or have the **EXECUTE** privilege on the -// registered model to recieve the registered models in the response. For the -// latter case, the caller must also be the owner or have the **USE_CATALOG** -// privilege on the parent catalog and the **USE_SCHEMA** privilege on the -// parent schema. -// -// There is no guarantee of a specific ordering of the elements in the response. -// -// This method is generated by Databricks SDK Code Generator. -func (a *RegisteredModelsAPI) List(ctx context.Context, request ListRegisteredModelsRequest) listing.Iterator[RegisteredModelInfo] { - - getNextPage := func(ctx context.Context, req ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.registeredModelsImpl.List(ctx, req) - } - getItems := func(resp *ListRegisteredModelsResponse) []RegisteredModelInfo { - return resp.RegisteredModels - } - getNextReq := func(resp *ListRegisteredModelsResponse) *ListRegisteredModelsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List Registered Models. -// -// List registered models. You can list registered models under a particular -// schema, or list all registered models in the current metastore. -// -// The returned models are filtered based on the privileges of the calling user. -// For example, the metastore admin is able to list all the registered models. A -// regular user needs to be the owner or have the **EXECUTE** privilege on the -// registered model to recieve the registered models in the response. For the -// latter case, the caller must also be the owner or have the **USE_CATALOG** -// privilege on the parent catalog and the **USE_SCHEMA** privilege on the -// parent schema. -// -// There is no guarantee of a specific ordering of the elements in the response. -// -// This method is generated by Databricks SDK Code Generator. -func (a *RegisteredModelsAPI) ListAll(ctx context.Context, request ListRegisteredModelsRequest) ([]RegisteredModelInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[RegisteredModelInfo](ctx, iterator) -} - // RegisteredModelInfoNameToFullNameMap calls [RegisteredModelsAPI.ListAll] and creates a map of results with [RegisteredModelInfo].Name as key and [RegisteredModelInfo].FullName as value. // // Returns an error if there's more than one [RegisteredModelInfo] with the same .Name. @@ -2882,49 +2383,6 @@ func (a *ResourceQuotasAPI) GetQuotaByParentSecurableTypeAndParentFullNameAndQuo }) } -// List all resource quotas under a metastore. -// -// ListQuotas returns all quota values under the metastore. There are no SLAs on -// the freshness of the counts returned. This API does not trigger a refresh of -// quota counts. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ResourceQuotasAPI) ListQuotas(ctx context.Context, request ListQuotasRequest) listing.Iterator[QuotaInfo] { - - getNextPage := func(ctx context.Context, req ListQuotasRequest) (*ListQuotasResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.resourceQuotasImpl.ListQuotas(ctx, req) - } - getItems := func(resp *ListQuotasResponse) []QuotaInfo { - return resp.Quotas - } - getNextReq := func(resp *ListQuotasResponse) *ListQuotasRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List all resource quotas under a metastore. -// -// ListQuotas returns all quota values under the metastore. There are no SLAs on -// the freshness of the counts returned. This API does not trigger a refresh of -// quota counts. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ResourceQuotasAPI) ListQuotasAll(ctx context.Context, request ListQuotasRequest) ([]QuotaInfo, error) { - iterator := a.ListQuotas(ctx, request) - return listing.ToSlice[QuotaInfo](ctx, iterator) -} - type SchemasInterface interface { // Create a schema. @@ -3048,53 +2506,6 @@ func (a *SchemasAPI) GetByFullName(ctx context.Context, fullName string) (*Schem }) } -// List schemas. -// -// Gets an array of schemas for a catalog in the metastore. If the caller is the -// metastore admin or the owner of the parent catalog, all schemas for the -// catalog will be retrieved. Otherwise, only schemas owned by the caller (or -// for which the caller has the **USE_SCHEMA** privilege) will be retrieved. -// There is no guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SchemasAPI) List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo] { - - getNextPage := func(ctx context.Context, req ListSchemasRequest) (*ListSchemasResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.schemasImpl.List(ctx, req) - } - getItems := func(resp *ListSchemasResponse) []SchemaInfo { - return resp.Schemas - } - getNextReq := func(resp *ListSchemasResponse) *ListSchemasRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List schemas. -// -// Gets an array of schemas for a catalog in the metastore. If the caller is the -// metastore admin or the owner of the parent catalog, all schemas for the -// catalog will be retrieved. Otherwise, only schemas owned by the caller (or -// for which the caller has the **USE_SCHEMA** privilege) will be retrieved. -// There is no guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SchemasAPI) ListAll(ctx context.Context, request ListSchemasRequest) ([]SchemaInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[SchemaInfo](ctx, iterator) -} - // SchemaInfoNameToFullNameMap calls [SchemasAPI.ListAll] and creates a map of results with [SchemaInfo].Name as key and [SchemaInfo].FullName as value. // // Returns an error if there's more than one [SchemaInfo] with the same .Name. @@ -3280,53 +2691,6 @@ func (a *StorageCredentialsAPI) GetByName(ctx context.Context, name string) (*St }) } -// List credentials. -// -// Gets an array of storage credentials (as __StorageCredentialInfo__ objects). -// The array is limited to only those storage credentials the caller has -// permission to access. If the caller is a metastore admin, retrieval of -// credentials is unrestricted. There is no guarantee of a specific ordering of -// the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *StorageCredentialsAPI) List(ctx context.Context, request ListStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { - - getNextPage := func(ctx context.Context, req ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.storageCredentialsImpl.List(ctx, req) - } - getItems := func(resp *ListStorageCredentialsResponse) []StorageCredentialInfo { - return resp.StorageCredentials - } - getNextReq := func(resp *ListStorageCredentialsResponse) *ListStorageCredentialsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List credentials. -// -// Gets an array of storage credentials (as __StorageCredentialInfo__ objects). -// The array is limited to only those storage credentials the caller has -// permission to access. If the caller is a metastore admin, retrieval of -// credentials is unrestricted. There is no guarantee of a specific ordering of -// the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *StorageCredentialsAPI) ListAll(ctx context.Context, request ListStorageCredentialsRequest) ([]StorageCredentialInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[StorageCredentialInfo](ctx, iterator) -} - // StorageCredentialInfoNameToIdMap calls [StorageCredentialsAPI.ListAll] and creates a map of results with [StorageCredentialInfo].Name as key and [StorageCredentialInfo].Id as value. // // Returns an error if there's more than one [StorageCredentialInfo] with the same .Name. @@ -3421,53 +2785,12 @@ func (a *SystemSchemasAPI) DisableByMetastoreIdAndSchemaName(ctx context.Context }) } -// List system schemas. -// -// Gets an array of system schemas for a metastore. The caller must be an -// account admin or a metastore admin. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SystemSchemasAPI) List(ctx context.Context, request ListSystemSchemasRequest) listing.Iterator[SystemSchemaInfo] { - - getNextPage := func(ctx context.Context, req ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.systemSchemasImpl.List(ctx, req) - } - getItems := func(resp *ListSystemSchemasResponse) []SystemSchemaInfo { - return resp.Schemas - } - getNextReq := func(resp *ListSystemSchemasResponse) *ListSystemSchemasRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List system schemas. -// -// Gets an array of system schemas for a metastore. The caller must be an -// account admin or a metastore admin. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SystemSchemasAPI) ListAll(ctx context.Context, request ListSystemSchemasRequest) ([]SystemSchemaInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[SystemSchemaInfo](ctx, iterator) -} - // List system schemas. // // Gets an array of system schemas for a metastore. The caller must be an // account admin or a metastore admin. func (a *SystemSchemasAPI) ListByMetastoreId(ctx context.Context, metastoreId string) (*ListSystemSchemasResponse, error) { - return a.systemSchemasImpl.List(ctx, ListSystemSchemasRequest{ + return a.systemSchemasImpl.internalList(ctx, ListSystemSchemasRequest{ MetastoreId: metastoreId, }) } @@ -3775,55 +3098,6 @@ func (a *TablesAPI) GetByFullName(ctx context.Context, fullName string) (*TableI }) } -// List tables. -// -// Gets an array of all tables for the current metastore under the parent -// catalog and schema. The caller must be a metastore admin or an owner of (or -// have the **SELECT** privilege on) the table. For the latter case, the caller -// must also be the owner or have the **USE_CATALOG** privilege on the parent -// catalog and the **USE_SCHEMA** privilege on the parent schema. There is no -// guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *TablesAPI) List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo] { - - getNextPage := func(ctx context.Context, req ListTablesRequest) (*ListTablesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.tablesImpl.List(ctx, req) - } - getItems := func(resp *ListTablesResponse) []TableInfo { - return resp.Tables - } - getNextReq := func(resp *ListTablesResponse) *ListTablesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List tables. -// -// Gets an array of all tables for the current metastore under the parent -// catalog and schema. The caller must be a metastore admin or an owner of (or -// have the **SELECT** privilege on) the table. For the latter case, the caller -// must also be the owner or have the **USE_CATALOG** privilege on the parent -// catalog and the **USE_SCHEMA** privilege on the parent schema. There is no -// guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *TablesAPI) ListAll(ctx context.Context, request ListTablesRequest) ([]TableInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[TableInfo](ctx, iterator) -} - // TableInfoNameToTableIdMap calls [TablesAPI.ListAll] and creates a map of results with [TableInfo].Name as key and [TableInfo].TableId as value. // // Returns an error if there's more than one [TableInfo] with the same .Name. @@ -3877,65 +3151,6 @@ func (a *TablesAPI) GetByName(ctx context.Context, name string) (*TableInfo, err return &alternatives[0], nil } -// List table summaries. -// -// Gets an array of summaries for tables for a schema and catalog within the -// metastore. The table summaries returned are either: -// -// * summaries for tables (within the current metastore and parent catalog and -// schema), when the user is a metastore admin, or: * summaries for tables and -// schemas (within the current metastore and parent catalog) for which the user -// has ownership or the **SELECT** privilege on the table and ownership or -// **USE_SCHEMA** privilege on the schema, provided that the user also has -// ownership or the **USE_CATALOG** privilege on the parent catalog. -// -// There is no guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *TablesAPI) ListSummaries(ctx context.Context, request ListSummariesRequest) listing.Iterator[TableSummary] { - - getNextPage := func(ctx context.Context, req ListSummariesRequest) (*ListTableSummariesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.tablesImpl.ListSummaries(ctx, req) - } - getItems := func(resp *ListTableSummariesResponse) []TableSummary { - return resp.Tables - } - getNextReq := func(resp *ListTableSummariesResponse) *ListSummariesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List table summaries. -// -// Gets an array of summaries for tables for a schema and catalog within the -// metastore. The table summaries returned are either: -// -// * summaries for tables (within the current metastore and parent catalog and -// schema), when the user is a metastore admin, or: * summaries for tables and -// schemas (within the current metastore and parent catalog) for which the user -// has ownership or the **SELECT** privilege on the table and ownership or -// **USE_SCHEMA** privilege on the schema, provided that the user also has -// ownership or the **USE_CATALOG** privilege on the parent catalog. -// -// There is no guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *TablesAPI) ListSummariesAll(ctx context.Context, request ListSummariesRequest) ([]TableSummary, error) { - iterator := a.ListSummaries(ctx, request) - return listing.ToSlice[TableSummary](ctx, iterator) -} - type TemporaryTableCredentialsInterface interface { // Generate a temporary table credential. @@ -4139,65 +3354,6 @@ func (a *VolumesAPI) DeleteByName(ctx context.Context, name string) error { }) } -// List Volumes. -// -// Gets an array of volumes for the current metastore under the parent catalog -// and schema. -// -// The returned volumes are filtered based on the privileges of the calling -// user. For example, the metastore admin is able to list all the volumes. A -// regular user needs to be the owner or have the **READ VOLUME** privilege on -// the volume to recieve the volumes in the response. For the latter case, the -// caller must also be the owner or have the **USE_CATALOG** privilege on the -// parent catalog and the **USE_SCHEMA** privilege on the parent schema. -// -// There is no guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *VolumesAPI) List(ctx context.Context, request ListVolumesRequest) listing.Iterator[VolumeInfo] { - - getNextPage := func(ctx context.Context, req ListVolumesRequest) (*ListVolumesResponseContent, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.volumesImpl.List(ctx, req) - } - getItems := func(resp *ListVolumesResponseContent) []VolumeInfo { - return resp.Volumes - } - getNextReq := func(resp *ListVolumesResponseContent) *ListVolumesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List Volumes. -// -// Gets an array of volumes for the current metastore under the parent catalog -// and schema. -// -// The returned volumes are filtered based on the privileges of the calling -// user. For example, the metastore admin is able to list all the volumes. A -// regular user needs to be the owner or have the **READ VOLUME** privilege on -// the volume to recieve the volumes in the response. For the latter case, the -// caller must also be the owner or have the **USE_CATALOG** privilege on the -// parent catalog and the **USE_SCHEMA** privilege on the parent schema. -// -// There is no guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *VolumesAPI) ListAll(ctx context.Context, request ListVolumesRequest) ([]VolumeInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[VolumeInfo](ctx, iterator) -} - // VolumeInfoNameToVolumeIdMap calls [VolumesAPI.ListAll] and creates a map of results with [VolumeInfo].Name as key and [VolumeInfo].VolumeId as value. // // Returns an error if there's more than one [VolumeInfo] with the same .Name. @@ -4355,53 +3511,12 @@ func (a *WorkspaceBindingsAPI) GetByName(ctx context.Context, name string) (*Cur }) } -// Get securable workspace bindings. -// -// Gets workspace bindings of the securable. The caller must be a metastore -// admin or an owner of the securable. -// -// This method is generated by Databricks SDK Code Generator. -func (a *WorkspaceBindingsAPI) GetBindings(ctx context.Context, request GetBindingsRequest) listing.Iterator[WorkspaceBinding] { - - getNextPage := func(ctx context.Context, req GetBindingsRequest) (*WorkspaceBindingsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.workspaceBindingsImpl.GetBindings(ctx, req) - } - getItems := func(resp *WorkspaceBindingsResponse) []WorkspaceBinding { - return resp.Bindings - } - getNextReq := func(resp *WorkspaceBindingsResponse) *GetBindingsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Get securable workspace bindings. -// -// Gets workspace bindings of the securable. The caller must be a metastore -// admin or an owner of the securable. -// -// This method is generated by Databricks SDK Code Generator. -func (a *WorkspaceBindingsAPI) GetBindingsAll(ctx context.Context, request GetBindingsRequest) ([]WorkspaceBinding, error) { - iterator := a.GetBindings(ctx, request) - return listing.ToSlice[WorkspaceBinding](ctx, iterator) -} - // Get securable workspace bindings. // // Gets workspace bindings of the securable. The caller must be a metastore // admin or an owner of the securable. func (a *WorkspaceBindingsAPI) GetBindingsBySecurableTypeAndSecurableName(ctx context.Context, securableType GetBindingsSecurableType, securableName string) (*WorkspaceBindingsResponse, error) { - return a.workspaceBindingsImpl.GetBindings(ctx, GetBindingsRequest{ + return a.workspaceBindingsImpl.internalGetBindings(ctx, GetBindingsRequest{ SecurableType: securableType, SecurableName: securableName, }) diff --git a/service/catalog/impl.go b/service/catalog/impl.go index 91e940943..c94799864 100755 --- a/service/catalog/impl.go +++ b/service/catalog/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just AccountMetastoreAssignments API methods @@ -46,7 +48,38 @@ func (a *accountMetastoreAssignmentsImpl) Get(ctx context.Context, request GetAc return &accountsMetastoreAssignment, err } -func (a *accountMetastoreAssignmentsImpl) List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { +// Get all workspaces assigned to a metastore. +// +// Gets a list of all Databricks workspace IDs that have been assigned to given +// metastore. +func (a *accountMetastoreAssignmentsImpl) List(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) listing.Iterator[int64] { + + getNextPage := func(ctx context.Context, req ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAccountMetastoreAssignmentsResponse) []int64 { + return resp.WorkspaceIds + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all workspaces assigned to a metastore. +// +// Gets a list of all Databricks workspace IDs that have been assigned to given +// metastore. +func (a *accountMetastoreAssignmentsImpl) ListAll(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) ([]int64, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[int64](ctx, iterator) +} + +func (a *accountMetastoreAssignmentsImpl) internalList(ctx context.Context, request ListAccountMetastoreAssignmentsRequest) (*ListAccountMetastoreAssignmentsResponse, error) { var listAccountMetastoreAssignmentsResponse ListAccountMetastoreAssignmentsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v/workspaces", a.client.ConfiguredAccountID(), request.MetastoreId) queryParams := make(map[string]any) @@ -103,7 +136,37 @@ func (a *accountMetastoresImpl) Get(ctx context.Context, request GetAccountMetas return &accountsMetastoreInfo, err } -func (a *accountMetastoresImpl) List(ctx context.Context) (*ListMetastoresResponse, error) { +// Get all metastores associated with an account. +// +// Gets all Unity Catalog metastores associated with an account specified by ID. +func (a *accountMetastoresImpl) List(ctx context.Context) listing.Iterator[MetastoreInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListMetastoresResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListMetastoresResponse) []MetastoreInfo { + return resp.Metastores + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all metastores associated with an account. +// +// Gets all Unity Catalog metastores associated with an account specified by ID. +func (a *accountMetastoresImpl) ListAll(ctx context.Context) ([]MetastoreInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[MetastoreInfo](ctx, iterator) +} + +func (a *accountMetastoresImpl) internalList(ctx context.Context) (*ListMetastoresResponse, error) { var listMetastoresResponse ListMetastoresResponse path := fmt.Sprintf("/api/2.0/accounts/%v/metastores", a.client.ConfiguredAccountID()) @@ -160,7 +223,38 @@ func (a *accountStorageCredentialsImpl) Get(ctx context.Context, request GetAcco return &accountsStorageCredentialInfo, err } -func (a *accountStorageCredentialsImpl) List(ctx context.Context, request ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { +// Get all storage credentials assigned to a metastore. +// +// Gets a list of all storage credentials that have been assigned to given +// metastore. +func (a *accountStorageCredentialsImpl) List(ctx context.Context, request ListAccountStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { + + getNextPage := func(ctx context.Context, req ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAccountStorageCredentialsResponse) []StorageCredentialInfo { + return resp.StorageCredentials + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all storage credentials assigned to a metastore. +// +// Gets a list of all storage credentials that have been assigned to given +// metastore. +func (a *accountStorageCredentialsImpl) ListAll(ctx context.Context, request ListAccountStorageCredentialsRequest) ([]StorageCredentialInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[StorageCredentialInfo](ctx, iterator) +} + +func (a *accountStorageCredentialsImpl) internalList(ctx context.Context, request ListAccountStorageCredentialsRequest) (*ListAccountStorageCredentialsResponse, error) { var listAccountStorageCredentialsResponse ListAccountStorageCredentialsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/metastores/%v/storage-credentials", a.client.ConfiguredAccountID(), request.MetastoreId) queryParams := make(map[string]any) @@ -243,7 +337,52 @@ func (a *catalogsImpl) Get(ctx context.Context, request GetCatalogRequest) (*Cat return &catalogInfo, err } -func (a *catalogsImpl) List(ctx context.Context, request ListCatalogsRequest) (*ListCatalogsResponse, error) { +// List catalogs. +// +// Gets an array of catalogs in the metastore. If the caller is the metastore +// admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the +// caller (or for which the caller has the **USE_CATALOG** privilege) will be +// retrieved. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *catalogsImpl) List(ctx context.Context, request ListCatalogsRequest) listing.Iterator[CatalogInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListCatalogsRequest) (*ListCatalogsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListCatalogsResponse) []CatalogInfo { + return resp.Catalogs + } + getNextReq := func(resp *ListCatalogsResponse) *ListCatalogsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List catalogs. +// +// Gets an array of catalogs in the metastore. If the caller is the metastore +// admin, all catalogs will be retrieved. Otherwise, only catalogs owned by the +// caller (or for which the caller has the **USE_CATALOG** privilege) will be +// retrieved. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *catalogsImpl) ListAll(ctx context.Context, request ListCatalogsRequest) ([]CatalogInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[CatalogInfo](ctx, iterator) +} + +func (a *catalogsImpl) internalList(ctx context.Context, request ListCatalogsRequest) (*ListCatalogsResponse, error) { var listCatalogsResponse ListCatalogsResponse path := "/api/2.1/unity-catalog/catalogs" queryParams := make(map[string]any) @@ -300,7 +439,44 @@ func (a *connectionsImpl) Get(ctx context.Context, request GetConnectionRequest) return &connectionInfo, err } -func (a *connectionsImpl) List(ctx context.Context, request ListConnectionsRequest) (*ListConnectionsResponse, error) { +// List connections. +// +// List all connections. +func (a *connectionsImpl) List(ctx context.Context, request ListConnectionsRequest) listing.Iterator[ConnectionInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListConnectionsRequest) (*ListConnectionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListConnectionsResponse) []ConnectionInfo { + return resp.Connections + } + getNextReq := func(resp *ListConnectionsResponse) *ListConnectionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List connections. +// +// List all connections. +func (a *connectionsImpl) ListAll(ctx context.Context, request ListConnectionsRequest) ([]ConnectionInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ConnectionInfo](ctx, iterator) +} + +func (a *connectionsImpl) internalList(ctx context.Context, request ListConnectionsRequest) (*ListConnectionsResponse, error) { var listConnectionsResponse ListConnectionsResponse path := "/api/2.1/unity-catalog/connections" queryParams := make(map[string]any) @@ -368,7 +544,52 @@ func (a *credentialsImpl) GetCredential(ctx context.Context, request GetCredenti return &credentialInfo, err } -func (a *credentialsImpl) ListCredentials(ctx context.Context, request ListCredentialsRequest) (*ListCredentialsResponse, error) { +// List credentials. +// +// Gets an array of credentials (as __CredentialInfo__ objects). +// +// The array is limited to only the credentials that the caller has permission +// to access. If the caller is a metastore admin, retrieval of credentials is +// unrestricted. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *credentialsImpl) ListCredentials(ctx context.Context, request ListCredentialsRequest) listing.Iterator[CredentialInfo] { + + getNextPage := func(ctx context.Context, req ListCredentialsRequest) (*ListCredentialsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListCredentials(ctx, req) + } + getItems := func(resp *ListCredentialsResponse) []CredentialInfo { + return resp.Credentials + } + getNextReq := func(resp *ListCredentialsResponse) *ListCredentialsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List credentials. +// +// Gets an array of credentials (as __CredentialInfo__ objects). +// +// The array is limited to only the credentials that the caller has permission +// to access. If the caller is a metastore admin, retrieval of credentials is +// unrestricted. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *credentialsImpl) ListCredentialsAll(ctx context.Context, request ListCredentialsRequest) ([]CredentialInfo, error) { + iterator := a.ListCredentials(ctx, request) + return listing.ToSlice[CredentialInfo](ctx, iterator) +} + +func (a *credentialsImpl) internalListCredentials(ctx context.Context, request ListCredentialsRequest) (*ListCredentialsResponse, error) { var listCredentialsResponse ListCredentialsResponse path := "/api/2.1/unity-catalog/credentials" queryParams := make(map[string]any) @@ -436,7 +657,52 @@ func (a *externalLocationsImpl) Get(ctx context.Context, request GetExternalLoca return &externalLocationInfo, err } -func (a *externalLocationsImpl) List(ctx context.Context, request ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { +// List external locations. +// +// Gets an array of external locations (__ExternalLocationInfo__ objects) from +// the metastore. The caller must be a metastore admin, the owner of the +// external location, or a user that has some privilege on the external +// location. There is no guarantee of a specific ordering of the elements in the +// array. +func (a *externalLocationsImpl) List(ctx context.Context, request ListExternalLocationsRequest) listing.Iterator[ExternalLocationInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListExternalLocationsResponse) []ExternalLocationInfo { + return resp.ExternalLocations + } + getNextReq := func(resp *ListExternalLocationsResponse) *ListExternalLocationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List external locations. +// +// Gets an array of external locations (__ExternalLocationInfo__ objects) from +// the metastore. The caller must be a metastore admin, the owner of the +// external location, or a user that has some privilege on the external +// location. There is no guarantee of a specific ordering of the elements in the +// array. +func (a *externalLocationsImpl) ListAll(ctx context.Context, request ListExternalLocationsRequest) ([]ExternalLocationInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ExternalLocationInfo](ctx, iterator) +} + +func (a *externalLocationsImpl) internalList(ctx context.Context, request ListExternalLocationsRequest) (*ListExternalLocationsResponse, error) { var listExternalLocationsResponse ListExternalLocationsResponse path := "/api/2.1/unity-catalog/external-locations" queryParams := make(map[string]any) @@ -493,7 +759,54 @@ func (a *functionsImpl) Get(ctx context.Context, request GetFunctionRequest) (*F return &functionInfo, err } -func (a *functionsImpl) List(ctx context.Context, request ListFunctionsRequest) (*ListFunctionsResponse, error) { +// List functions. +// +// List functions within the specified parent catalog and schema. If the user is +// a metastore admin, all functions are returned in the output list. Otherwise, +// the user must have the **USE_CATALOG** privilege on the catalog and the +// **USE_SCHEMA** privilege on the schema, and the output list contains only +// functions for which either the user has the **EXECUTE** privilege or the user +// is the owner. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *functionsImpl) List(ctx context.Context, request ListFunctionsRequest) listing.Iterator[FunctionInfo] { + + getNextPage := func(ctx context.Context, req ListFunctionsRequest) (*ListFunctionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFunctionsResponse) []FunctionInfo { + return resp.Functions + } + getNextReq := func(resp *ListFunctionsResponse) *ListFunctionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List functions. +// +// List functions within the specified parent catalog and schema. If the user is +// a metastore admin, all functions are returned in the output list. Otherwise, +// the user must have the **USE_CATALOG** privilege on the catalog and the +// **USE_SCHEMA** privilege on the schema, and the output list contains only +// functions for which either the user has the **EXECUTE** privilege or the user +// is the owner. There is no guarantee of a specific ordering of the elements in +// the array. +func (a *functionsImpl) ListAll(ctx context.Context, request ListFunctionsRequest) ([]FunctionInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FunctionInfo](ctx, iterator) +} + +func (a *functionsImpl) internalList(ctx context.Context, request ListFunctionsRequest) (*ListFunctionsResponse, error) { var listFunctionsResponse ListFunctionsResponse path := "/api/2.1/unity-catalog/functions" queryParams := make(map[string]any) @@ -607,7 +920,41 @@ func (a *metastoresImpl) Get(ctx context.Context, request GetMetastoreRequest) ( return &metastoreInfo, err } -func (a *metastoresImpl) List(ctx context.Context) (*ListMetastoresResponse, error) { +// List metastores. +// +// Gets an array of the available metastores (as __MetastoreInfo__ objects). The +// caller must be an admin to retrieve this info. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *metastoresImpl) List(ctx context.Context) listing.Iterator[MetastoreInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListMetastoresResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListMetastoresResponse) []MetastoreInfo { + return resp.Metastores + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List metastores. +// +// Gets an array of the available metastores (as __MetastoreInfo__ objects). The +// caller must be an admin to retrieve this info. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *metastoresImpl) ListAll(ctx context.Context) ([]MetastoreInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[MetastoreInfo](ctx, iterator) +} + +func (a *metastoresImpl) internalList(ctx context.Context) (*ListMetastoresResponse, error) { var listMetastoresResponse ListMetastoresResponse path := "/api/2.1/unity-catalog/metastores" @@ -693,7 +1040,66 @@ func (a *modelVersionsImpl) GetByAlias(ctx context.Context, request GetByAliasRe return &modelVersionInfo, err } -func (a *modelVersionsImpl) List(ctx context.Context, request ListModelVersionsRequest) (*ListModelVersionsResponse, error) { +// List Model Versions. +// +// List model versions. You can list model versions under a particular schema, +// or list all model versions in the current metastore. +// +// The returned models are filtered based on the privileges of the calling user. +// For example, the metastore admin is able to list all the model versions. A +// regular user needs to be the owner or have the **EXECUTE** privilege on the +// parent registered model to recieve the model versions in the response. For +// the latter case, the caller must also be the owner or have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema. +// +// There is no guarantee of a specific ordering of the elements in the response. +// The elements in the response will not contain any aliases or tags. +func (a *modelVersionsImpl) List(ctx context.Context, request ListModelVersionsRequest) listing.Iterator[ModelVersionInfo] { + + getNextPage := func(ctx context.Context, req ListModelVersionsRequest) (*ListModelVersionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListModelVersionsResponse) []ModelVersionInfo { + return resp.ModelVersions + } + getNextReq := func(resp *ListModelVersionsResponse) *ListModelVersionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List Model Versions. +// +// List model versions. You can list model versions under a particular schema, +// or list all model versions in the current metastore. +// +// The returned models are filtered based on the privileges of the calling user. +// For example, the metastore admin is able to list all the model versions. A +// regular user needs to be the owner or have the **EXECUTE** privilege on the +// parent registered model to recieve the model versions in the response. For +// the latter case, the caller must also be the owner or have the +// **USE_CATALOG** privilege on the parent catalog and the **USE_SCHEMA** +// privilege on the parent schema. +// +// There is no guarantee of a specific ordering of the elements in the response. +// The elements in the response will not contain any aliases or tags. +func (a *modelVersionsImpl) ListAll(ctx context.Context, request ListModelVersionsRequest) ([]ModelVersionInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ModelVersionInfo](ctx, iterator) +} + +func (a *modelVersionsImpl) internalList(ctx context.Context, request ListModelVersionsRequest) (*ListModelVersionsResponse, error) { var listModelVersionsResponse ListModelVersionsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/models/%v/versions", request.FullName) queryParams := make(map[string]any) @@ -890,7 +1296,64 @@ func (a *registeredModelsImpl) Get(ctx context.Context, request GetRegisteredMod return ®isteredModelInfo, err } -func (a *registeredModelsImpl) List(ctx context.Context, request ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { +// List Registered Models. +// +// List registered models. You can list registered models under a particular +// schema, or list all registered models in the current metastore. +// +// The returned models are filtered based on the privileges of the calling user. +// For example, the metastore admin is able to list all the registered models. A +// regular user needs to be the owner or have the **EXECUTE** privilege on the +// registered model to recieve the registered models in the response. For the +// latter case, the caller must also be the owner or have the **USE_CATALOG** +// privilege on the parent catalog and the **USE_SCHEMA** privilege on the +// parent schema. +// +// There is no guarantee of a specific ordering of the elements in the response. +func (a *registeredModelsImpl) List(ctx context.Context, request ListRegisteredModelsRequest) listing.Iterator[RegisteredModelInfo] { + + getNextPage := func(ctx context.Context, req ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListRegisteredModelsResponse) []RegisteredModelInfo { + return resp.RegisteredModels + } + getNextReq := func(resp *ListRegisteredModelsResponse) *ListRegisteredModelsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List Registered Models. +// +// List registered models. You can list registered models under a particular +// schema, or list all registered models in the current metastore. +// +// The returned models are filtered based on the privileges of the calling user. +// For example, the metastore admin is able to list all the registered models. A +// regular user needs to be the owner or have the **EXECUTE** privilege on the +// registered model to recieve the registered models in the response. For the +// latter case, the caller must also be the owner or have the **USE_CATALOG** +// privilege on the parent catalog and the **USE_SCHEMA** privilege on the +// parent schema. +// +// There is no guarantee of a specific ordering of the elements in the response. +func (a *registeredModelsImpl) ListAll(ctx context.Context, request ListRegisteredModelsRequest) ([]RegisteredModelInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[RegisteredModelInfo](ctx, iterator) +} + +func (a *registeredModelsImpl) internalList(ctx context.Context, request ListRegisteredModelsRequest) (*ListRegisteredModelsResponse, error) { var listRegisteredModelsResponse ListRegisteredModelsResponse path := "/api/2.1/unity-catalog/models" queryParams := make(map[string]any) @@ -937,7 +1400,46 @@ func (a *resourceQuotasImpl) GetQuota(ctx context.Context, request GetQuotaReque return &getQuotaResponse, err } -func (a *resourceQuotasImpl) ListQuotas(ctx context.Context, request ListQuotasRequest) (*ListQuotasResponse, error) { +// List all resource quotas under a metastore. +// +// ListQuotas returns all quota values under the metastore. There are no SLAs on +// the freshness of the counts returned. This API does not trigger a refresh of +// quota counts. +func (a *resourceQuotasImpl) ListQuotas(ctx context.Context, request ListQuotasRequest) listing.Iterator[QuotaInfo] { + + getNextPage := func(ctx context.Context, req ListQuotasRequest) (*ListQuotasResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListQuotas(ctx, req) + } + getItems := func(resp *ListQuotasResponse) []QuotaInfo { + return resp.Quotas + } + getNextReq := func(resp *ListQuotasResponse) *ListQuotasRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all resource quotas under a metastore. +// +// ListQuotas returns all quota values under the metastore. There are no SLAs on +// the freshness of the counts returned. This API does not trigger a refresh of +// quota counts. +func (a *resourceQuotasImpl) ListQuotasAll(ctx context.Context, request ListQuotasRequest) ([]QuotaInfo, error) { + iterator := a.ListQuotas(ctx, request) + return listing.ToSlice[QuotaInfo](ctx, iterator) +} + +func (a *resourceQuotasImpl) internalListQuotas(ctx context.Context, request ListQuotasRequest) (*ListQuotasResponse, error) { var listQuotasResponse ListQuotasResponse path := "/api/2.1/unity-catalog/resource-quotas/all-resource-quotas" queryParams := make(map[string]any) @@ -983,7 +1485,52 @@ func (a *schemasImpl) Get(ctx context.Context, request GetSchemaRequest) (*Schem return &schemaInfo, err } -func (a *schemasImpl) List(ctx context.Context, request ListSchemasRequest) (*ListSchemasResponse, error) { +// List schemas. +// +// Gets an array of schemas for a catalog in the metastore. If the caller is the +// metastore admin or the owner of the parent catalog, all schemas for the +// catalog will be retrieved. Otherwise, only schemas owned by the caller (or +// for which the caller has the **USE_SCHEMA** privilege) will be retrieved. +// There is no guarantee of a specific ordering of the elements in the array. +func (a *schemasImpl) List(ctx context.Context, request ListSchemasRequest) listing.Iterator[SchemaInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListSchemasRequest) (*ListSchemasResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListSchemasResponse) []SchemaInfo { + return resp.Schemas + } + getNextReq := func(resp *ListSchemasResponse) *ListSchemasRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List schemas. +// +// Gets an array of schemas for a catalog in the metastore. If the caller is the +// metastore admin or the owner of the parent catalog, all schemas for the +// catalog will be retrieved. Otherwise, only schemas owned by the caller (or +// for which the caller has the **USE_SCHEMA** privilege) will be retrieved. +// There is no guarantee of a specific ordering of the elements in the array. +func (a *schemasImpl) ListAll(ctx context.Context, request ListSchemasRequest) ([]SchemaInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[SchemaInfo](ctx, iterator) +} + +func (a *schemasImpl) internalList(ctx context.Context, request ListSchemasRequest) (*ListSchemasResponse, error) { var listSchemasResponse ListSchemasResponse path := "/api/2.1/unity-catalog/schemas" queryParams := make(map[string]any) @@ -1040,7 +1587,52 @@ func (a *storageCredentialsImpl) Get(ctx context.Context, request GetStorageCred return &storageCredentialInfo, err } -func (a *storageCredentialsImpl) List(ctx context.Context, request ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { +// List credentials. +// +// Gets an array of storage credentials (as __StorageCredentialInfo__ objects). +// The array is limited to only those storage credentials the caller has +// permission to access. If the caller is a metastore admin, retrieval of +// credentials is unrestricted. There is no guarantee of a specific ordering of +// the elements in the array. +func (a *storageCredentialsImpl) List(ctx context.Context, request ListStorageCredentialsRequest) listing.Iterator[StorageCredentialInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListStorageCredentialsResponse) []StorageCredentialInfo { + return resp.StorageCredentials + } + getNextReq := func(resp *ListStorageCredentialsResponse) *ListStorageCredentialsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List credentials. +// +// Gets an array of storage credentials (as __StorageCredentialInfo__ objects). +// The array is limited to only those storage credentials the caller has +// permission to access. If the caller is a metastore admin, retrieval of +// credentials is unrestricted. There is no guarantee of a specific ordering of +// the elements in the array. +func (a *storageCredentialsImpl) ListAll(ctx context.Context, request ListStorageCredentialsRequest) ([]StorageCredentialInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[StorageCredentialInfo](ctx, iterator) +} + +func (a *storageCredentialsImpl) internalList(ctx context.Context, request ListStorageCredentialsRequest) (*ListStorageCredentialsResponse, error) { var listStorageCredentialsResponse ListStorageCredentialsResponse path := "/api/2.1/unity-catalog/storage-credentials" queryParams := make(map[string]any) @@ -1097,7 +1689,46 @@ func (a *systemSchemasImpl) Enable(ctx context.Context, request EnableRequest) e return err } -func (a *systemSchemasImpl) List(ctx context.Context, request ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { +// List system schemas. +// +// Gets an array of system schemas for a metastore. The caller must be an +// account admin or a metastore admin. +func (a *systemSchemasImpl) List(ctx context.Context, request ListSystemSchemasRequest) listing.Iterator[SystemSchemaInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListSystemSchemasResponse) []SystemSchemaInfo { + return resp.Schemas + } + getNextReq := func(resp *ListSystemSchemasResponse) *ListSystemSchemasRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List system schemas. +// +// Gets an array of system schemas for a metastore. The caller must be an +// account admin or a metastore admin. +func (a *systemSchemasImpl) ListAll(ctx context.Context, request ListSystemSchemasRequest) ([]SystemSchemaInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[SystemSchemaInfo](ctx, iterator) +} + +func (a *systemSchemasImpl) internalList(ctx context.Context, request ListSystemSchemasRequest) (*ListSystemSchemasResponse, error) { var listSystemSchemasResponse ListSystemSchemasResponse path := fmt.Sprintf("/api/2.1/unity-catalog/metastores/%v/systemschemas", request.MetastoreId) queryParams := make(map[string]any) @@ -1168,7 +1799,54 @@ func (a *tablesImpl) Get(ctx context.Context, request GetTableRequest) (*TableIn return &tableInfo, err } -func (a *tablesImpl) List(ctx context.Context, request ListTablesRequest) (*ListTablesResponse, error) { +// List tables. +// +// Gets an array of all tables for the current metastore under the parent +// catalog and schema. The caller must be a metastore admin or an owner of (or +// have the **SELECT** privilege on) the table. For the latter case, the caller +// must also be the owner or have the **USE_CATALOG** privilege on the parent +// catalog and the **USE_SCHEMA** privilege on the parent schema. There is no +// guarantee of a specific ordering of the elements in the array. +func (a *tablesImpl) List(ctx context.Context, request ListTablesRequest) listing.Iterator[TableInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListTablesRequest) (*ListTablesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListTablesResponse) []TableInfo { + return resp.Tables + } + getNextReq := func(resp *ListTablesResponse) *ListTablesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List tables. +// +// Gets an array of all tables for the current metastore under the parent +// catalog and schema. The caller must be a metastore admin or an owner of (or +// have the **SELECT** privilege on) the table. For the latter case, the caller +// must also be the owner or have the **USE_CATALOG** privilege on the parent +// catalog and the **USE_SCHEMA** privilege on the parent schema. There is no +// guarantee of a specific ordering of the elements in the array. +func (a *tablesImpl) ListAll(ctx context.Context, request ListTablesRequest) ([]TableInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[TableInfo](ctx, iterator) +} + +func (a *tablesImpl) internalList(ctx context.Context, request ListTablesRequest) (*ListTablesResponse, error) { var listTablesResponse ListTablesResponse path := "/api/2.1/unity-catalog/tables" queryParams := make(map[string]any) @@ -1178,7 +1856,64 @@ func (a *tablesImpl) List(ctx context.Context, request ListTablesRequest) (*List return &listTablesResponse, err } -func (a *tablesImpl) ListSummaries(ctx context.Context, request ListSummariesRequest) (*ListTableSummariesResponse, error) { +// List table summaries. +// +// Gets an array of summaries for tables for a schema and catalog within the +// metastore. The table summaries returned are either: +// +// * summaries for tables (within the current metastore and parent catalog and +// schema), when the user is a metastore admin, or: * summaries for tables and +// schemas (within the current metastore and parent catalog) for which the user +// has ownership or the **SELECT** privilege on the table and ownership or +// **USE_SCHEMA** privilege on the schema, provided that the user also has +// ownership or the **USE_CATALOG** privilege on the parent catalog. +// +// There is no guarantee of a specific ordering of the elements in the array. +func (a *tablesImpl) ListSummaries(ctx context.Context, request ListSummariesRequest) listing.Iterator[TableSummary] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListSummariesRequest) (*ListTableSummariesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListSummaries(ctx, req) + } + getItems := func(resp *ListTableSummariesResponse) []TableSummary { + return resp.Tables + } + getNextReq := func(resp *ListTableSummariesResponse) *ListSummariesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List table summaries. +// +// Gets an array of summaries for tables for a schema and catalog within the +// metastore. The table summaries returned are either: +// +// * summaries for tables (within the current metastore and parent catalog and +// schema), when the user is a metastore admin, or: * summaries for tables and +// schemas (within the current metastore and parent catalog) for which the user +// has ownership or the **SELECT** privilege on the table and ownership or +// **USE_SCHEMA** privilege on the schema, provided that the user also has +// ownership or the **USE_CATALOG** privilege on the parent catalog. +// +// There is no guarantee of a specific ordering of the elements in the array. +func (a *tablesImpl) ListSummariesAll(ctx context.Context, request ListSummariesRequest) ([]TableSummary, error) { + iterator := a.ListSummaries(ctx, request) + return listing.ToSlice[TableSummary](ctx, iterator) +} + +func (a *tablesImpl) internalListSummaries(ctx context.Context, request ListSummariesRequest) (*ListTableSummariesResponse, error) { var listTableSummariesResponse ListTableSummariesResponse path := "/api/2.1/unity-catalog/table-summaries" queryParams := make(map[string]any) @@ -1240,7 +1975,62 @@ func (a *volumesImpl) Delete(ctx context.Context, request DeleteVolumeRequest) e return err } -func (a *volumesImpl) List(ctx context.Context, request ListVolumesRequest) (*ListVolumesResponseContent, error) { +// List Volumes. +// +// Gets an array of volumes for the current metastore under the parent catalog +// and schema. +// +// The returned volumes are filtered based on the privileges of the calling +// user. For example, the metastore admin is able to list all the volumes. A +// regular user needs to be the owner or have the **READ VOLUME** privilege on +// the volume to recieve the volumes in the response. For the latter case, the +// caller must also be the owner or have the **USE_CATALOG** privilege on the +// parent catalog and the **USE_SCHEMA** privilege on the parent schema. +// +// There is no guarantee of a specific ordering of the elements in the array. +func (a *volumesImpl) List(ctx context.Context, request ListVolumesRequest) listing.Iterator[VolumeInfo] { + + getNextPage := func(ctx context.Context, req ListVolumesRequest) (*ListVolumesResponseContent, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListVolumesResponseContent) []VolumeInfo { + return resp.Volumes + } + getNextReq := func(resp *ListVolumesResponseContent) *ListVolumesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List Volumes. +// +// Gets an array of volumes for the current metastore under the parent catalog +// and schema. +// +// The returned volumes are filtered based on the privileges of the calling +// user. For example, the metastore admin is able to list all the volumes. A +// regular user needs to be the owner or have the **READ VOLUME** privilege on +// the volume to recieve the volumes in the response. For the latter case, the +// caller must also be the owner or have the **USE_CATALOG** privilege on the +// parent catalog and the **USE_SCHEMA** privilege on the parent schema. +// +// There is no guarantee of a specific ordering of the elements in the array. +func (a *volumesImpl) ListAll(ctx context.Context, request ListVolumesRequest) ([]VolumeInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[VolumeInfo](ctx, iterator) +} + +func (a *volumesImpl) internalList(ctx context.Context, request ListVolumesRequest) (*ListVolumesResponseContent, error) { var listVolumesResponseContent ListVolumesResponseContent path := "/api/2.1/unity-catalog/volumes" queryParams := make(map[string]any) @@ -1286,7 +2076,44 @@ func (a *workspaceBindingsImpl) Get(ctx context.Context, request GetWorkspaceBin return ¤tWorkspaceBindings, err } -func (a *workspaceBindingsImpl) GetBindings(ctx context.Context, request GetBindingsRequest) (*WorkspaceBindingsResponse, error) { +// Get securable workspace bindings. +// +// Gets workspace bindings of the securable. The caller must be a metastore +// admin or an owner of the securable. +func (a *workspaceBindingsImpl) GetBindings(ctx context.Context, request GetBindingsRequest) listing.Iterator[WorkspaceBinding] { + + getNextPage := func(ctx context.Context, req GetBindingsRequest) (*WorkspaceBindingsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalGetBindings(ctx, req) + } + getItems := func(resp *WorkspaceBindingsResponse) []WorkspaceBinding { + return resp.Bindings + } + getNextReq := func(resp *WorkspaceBindingsResponse) *GetBindingsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get securable workspace bindings. +// +// Gets workspace bindings of the securable. The caller must be a metastore +// admin or an owner of the securable. +func (a *workspaceBindingsImpl) GetBindingsAll(ctx context.Context, request GetBindingsRequest) ([]WorkspaceBinding, error) { + iterator := a.GetBindings(ctx, request) + return listing.ToSlice[WorkspaceBinding](ctx, iterator) +} + +func (a *workspaceBindingsImpl) internalGetBindings(ctx context.Context, request GetBindingsRequest) (*WorkspaceBindingsResponse, error) { var workspaceBindingsResponse WorkspaceBindingsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/bindings/%v/%v", request.SecurableType, request.SecurableName) queryParams := make(map[string]any) diff --git a/service/cleanrooms/api.go b/service/cleanrooms/api.go index df4c63937..912b9483a 100755 --- a/service/cleanrooms/api.go +++ b/service/cleanrooms/api.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" - "github.com/databricks/databricks-sdk-go/useragent" ) type CleanRoomAssetsInterface interface { @@ -98,44 +97,9 @@ func (a *CleanRoomAssetsAPI) GetByCleanRoomNameAndAssetTypeAndAssetFullName(ctx }) } -// List assets. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CleanRoomAssetsAPI) List(ctx context.Context, request ListCleanRoomAssetsRequest) listing.Iterator[CleanRoomAsset] { - - getNextPage := func(ctx context.Context, req ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.cleanRoomAssetsImpl.List(ctx, req) - } - getItems := func(resp *ListCleanRoomAssetsResponse) []CleanRoomAsset { - return resp.Assets - } - getNextReq := func(resp *ListCleanRoomAssetsResponse) *ListCleanRoomAssetsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List assets. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CleanRoomAssetsAPI) ListAll(ctx context.Context, request ListCleanRoomAssetsRequest) ([]CleanRoomAsset, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[CleanRoomAsset](ctx, iterator) -} - // List assets. func (a *CleanRoomAssetsAPI) ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomAssetsResponse, error) { - return a.cleanRoomAssetsImpl.List(ctx, ListCleanRoomAssetsRequest{ + return a.cleanRoomAssetsImpl.internalList(ctx, ListCleanRoomAssetsRequest{ CleanRoomName: cleanRoomName, }) } @@ -175,50 +139,11 @@ type CleanRoomTaskRunsAPI struct { cleanRoomTaskRunsImpl } -// List notebook task runs. -// -// List all the historical notebook task runs in a clean room. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CleanRoomTaskRunsAPI) List(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) listing.Iterator[CleanRoomNotebookTaskRun] { - - getNextPage := func(ctx context.Context, req ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.cleanRoomTaskRunsImpl.List(ctx, req) - } - getItems := func(resp *ListCleanRoomNotebookTaskRunsResponse) []CleanRoomNotebookTaskRun { - return resp.Runs - } - getNextReq := func(resp *ListCleanRoomNotebookTaskRunsResponse) *ListCleanRoomNotebookTaskRunsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List notebook task runs. -// -// List all the historical notebook task runs in a clean room. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CleanRoomTaskRunsAPI) ListAll(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) ([]CleanRoomNotebookTaskRun, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[CleanRoomNotebookTaskRun](ctx, iterator) -} - // List notebook task runs. // // List all the historical notebook task runs in a clean room. func (a *CleanRoomTaskRunsAPI) ListByCleanRoomName(ctx context.Context, cleanRoomName string) (*ListCleanRoomNotebookTaskRunsResponse, error) { - return a.cleanRoomTaskRunsImpl.List(ctx, ListCleanRoomNotebookTaskRunsRequest{ + return a.cleanRoomTaskRunsImpl.internalList(ctx, ListCleanRoomNotebookTaskRunsRequest{ CleanRoomName: cleanRoomName, }) } @@ -330,44 +255,3 @@ func (a *CleanRoomsAPI) GetByName(ctx context.Context, name string) (*CleanRoom, Name: name, }) } - -// List clean rooms. -// -// Get a list of all clean rooms of the metastore. Only clean rooms the caller -// has access to are returned. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CleanRoomsAPI) List(ctx context.Context, request ListCleanRoomsRequest) listing.Iterator[CleanRoom] { - - getNextPage := func(ctx context.Context, req ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.cleanRoomsImpl.List(ctx, req) - } - getItems := func(resp *ListCleanRoomsResponse) []CleanRoom { - return resp.CleanRooms - } - getNextReq := func(resp *ListCleanRoomsResponse) *ListCleanRoomsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List clean rooms. -// -// Get a list of all clean rooms of the metastore. Only clean rooms the caller -// has access to are returned. -// -// This method is generated by Databricks SDK Code Generator. -func (a *CleanRoomsAPI) ListAll(ctx context.Context, request ListCleanRoomsRequest) ([]CleanRoom, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[CleanRoom](ctx, iterator) -} diff --git a/service/cleanrooms/impl.go b/service/cleanrooms/impl.go index 5e47e439a..9132d6140 100755 --- a/service/cleanrooms/impl.go +++ b/service/cleanrooms/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just CleanRoomAssets API methods @@ -46,7 +48,38 @@ func (a *cleanRoomAssetsImpl) Get(ctx context.Context, request GetCleanRoomAsset return &cleanRoomAsset, err } -func (a *cleanRoomAssetsImpl) List(ctx context.Context, request ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { +// List assets. +func (a *cleanRoomAssetsImpl) List(ctx context.Context, request ListCleanRoomAssetsRequest) listing.Iterator[CleanRoomAsset] { + + getNextPage := func(ctx context.Context, req ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListCleanRoomAssetsResponse) []CleanRoomAsset { + return resp.Assets + } + getNextReq := func(resp *ListCleanRoomAssetsResponse) *ListCleanRoomAssetsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List assets. +func (a *cleanRoomAssetsImpl) ListAll(ctx context.Context, request ListCleanRoomAssetsRequest) ([]CleanRoomAsset, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[CleanRoomAsset](ctx, iterator) +} + +func (a *cleanRoomAssetsImpl) internalList(ctx context.Context, request ListCleanRoomAssetsRequest) (*ListCleanRoomAssetsResponse, error) { var listCleanRoomAssetsResponse ListCleanRoomAssetsResponse path := fmt.Sprintf("/api/2.0/clean-rooms/%v/assets", request.CleanRoomName) queryParams := make(map[string]any) @@ -72,7 +105,42 @@ type cleanRoomTaskRunsImpl struct { client *client.DatabricksClient } -func (a *cleanRoomTaskRunsImpl) List(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { +// List notebook task runs. +// +// List all the historical notebook task runs in a clean room. +func (a *cleanRoomTaskRunsImpl) List(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) listing.Iterator[CleanRoomNotebookTaskRun] { + + getNextPage := func(ctx context.Context, req ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListCleanRoomNotebookTaskRunsResponse) []CleanRoomNotebookTaskRun { + return resp.Runs + } + getNextReq := func(resp *ListCleanRoomNotebookTaskRunsResponse) *ListCleanRoomNotebookTaskRunsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List notebook task runs. +// +// List all the historical notebook task runs in a clean room. +func (a *cleanRoomTaskRunsImpl) ListAll(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) ([]CleanRoomNotebookTaskRun, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[CleanRoomNotebookTaskRun](ctx, iterator) +} + +func (a *cleanRoomTaskRunsImpl) internalList(ctx context.Context, request ListCleanRoomNotebookTaskRunsRequest) (*ListCleanRoomNotebookTaskRunsResponse, error) { var listCleanRoomNotebookTaskRunsResponse ListCleanRoomNotebookTaskRunsResponse path := fmt.Sprintf("/api/2.0/clean-rooms/%v/runs", request.CleanRoomName) queryParams := make(map[string]any) @@ -129,7 +197,44 @@ func (a *cleanRoomsImpl) Get(ctx context.Context, request GetCleanRoomRequest) ( return &cleanRoom, err } -func (a *cleanRoomsImpl) List(ctx context.Context, request ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { +// List clean rooms. +// +// Get a list of all clean rooms of the metastore. Only clean rooms the caller +// has access to are returned. +func (a *cleanRoomsImpl) List(ctx context.Context, request ListCleanRoomsRequest) listing.Iterator[CleanRoom] { + + getNextPage := func(ctx context.Context, req ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListCleanRoomsResponse) []CleanRoom { + return resp.CleanRooms + } + getNextReq := func(resp *ListCleanRoomsResponse) *ListCleanRoomsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List clean rooms. +// +// Get a list of all clean rooms of the metastore. Only clean rooms the caller +// has access to are returned. +func (a *cleanRoomsImpl) ListAll(ctx context.Context, request ListCleanRoomsRequest) ([]CleanRoom, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[CleanRoom](ctx, iterator) +} + +func (a *cleanRoomsImpl) internalList(ctx context.Context, request ListCleanRoomsRequest) (*ListCleanRoomsResponse, error) { var listCleanRoomsResponse ListCleanRoomsResponse path := "/api/2.0/clean-rooms" queryParams := make(map[string]any) diff --git a/service/cleanrooms/model.go b/service/cleanrooms/model.go index cf4a225a5..44aeb6ae6 100755 --- a/service/cleanrooms/model.go +++ b/service/cleanrooms/model.go @@ -196,12 +196,6 @@ type CleanRoomAssetNotebook struct { // Base 64 representation of the notebook contents. This is the same format // as returned by :method:workspace/export with the format of **HTML**. NotebookContent string `json:"notebook_content,omitempty"` - // top-level status derived from all reviews - ReviewState CleanRoomNotebookReviewNotebookReviewState `json:"review_state,omitempty"` - // All existing approvals or rejections - Reviews []CleanRoomNotebookReview `json:"reviews,omitempty"` - // collaborators that can run the notebook - RunnerCollaborators []CleanRoomCollaborator `json:"runner_collaborators,omitempty"` ForceSendFields []string `json:"-"` } @@ -346,56 +340,6 @@ func (s CleanRoomCollaborator) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type CleanRoomNotebookReview struct { - // review comment - Comment string `json:"comment,omitempty"` - // timestamp of when the review was submitted - CreatedAtMillis int64 `json:"created_at_millis,omitempty"` - // review outcome - ReviewState CleanRoomNotebookReviewNotebookReviewState `json:"review_state,omitempty"` - // collaborator alias of the reviewer - ReviewerCollaboratorAlias string `json:"reviewer_collaborator_alias,omitempty"` - - ForceSendFields []string `json:"-"` -} - -func (s *CleanRoomNotebookReview) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s CleanRoomNotebookReview) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -type CleanRoomNotebookReviewNotebookReviewState string - -const CleanRoomNotebookReviewNotebookReviewStateApproved CleanRoomNotebookReviewNotebookReviewState = `APPROVED` - -const CleanRoomNotebookReviewNotebookReviewStatePending CleanRoomNotebookReviewNotebookReviewState = `PENDING` - -const CleanRoomNotebookReviewNotebookReviewStateRejected CleanRoomNotebookReviewNotebookReviewState = `REJECTED` - -// String representation for [fmt.Print] -func (f *CleanRoomNotebookReviewNotebookReviewState) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *CleanRoomNotebookReviewNotebookReviewState) Set(v string) error { - switch v { - case `APPROVED`, `PENDING`, `REJECTED`: - *f = CleanRoomNotebookReviewNotebookReviewState(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "APPROVED", "PENDING", "REJECTED"`, v) - } -} - -// Type always returns CleanRoomNotebookReviewNotebookReviewState to satisfy [pflag.Value] interface -func (f *CleanRoomNotebookReviewNotebookReviewState) Type() string { - return "CleanRoomNotebookReviewNotebookReviewState" -} - // Stores information about a single task run. type CleanRoomNotebookTaskRun struct { // Job run info of the task in the runner's local workspace. This field is diff --git a/service/compute/api.go b/service/compute/api.go index 89b6b6e61..4e834e706 100755 --- a/service/compute/api.go +++ b/service/compute/api.go @@ -193,39 +193,6 @@ func (a *ClusterPoliciesAPI) GetPermissionsByClusterPolicyId(ctx context.Context }) } -// List cluster policies. -// -// Returns a list of policies accessible by the requesting user. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ClusterPoliciesAPI) List(ctx context.Context, request ListClusterPoliciesRequest) listing.Iterator[Policy] { - - getNextPage := func(ctx context.Context, req ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.clusterPoliciesImpl.List(ctx, req) - } - getItems := func(resp *ListPoliciesResponse) []Policy { - return resp.Policies - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List cluster policies. -// -// Returns a list of policies accessible by the requesting user. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ClusterPoliciesAPI) ListAll(ctx context.Context, request ListClusterPoliciesRequest) ([]Policy, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[Policy](ctx, iterator) -} - // PolicyNameToPolicyIdMap calls [ClusterPoliciesAPI.ListAll] and creates a map of results with [Policy].Name as key and [Policy].PolicyId as value. // // Returns an error if there's more than one [Policy] with the same .Name. @@ -954,51 +921,6 @@ func (a *ClustersAPI) EditAndWait(ctx context.Context, editCluster EditCluster, return wait.Get() } -// List cluster activity events. -// -// Retrieves a list of events about the activity of a cluster. This API is -// paginated. If there are more events to read, the response includes all the -// nparameters necessary to request the next page of events. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ClustersAPI) Events(ctx context.Context, request GetEvents) listing.Iterator[ClusterEvent] { - - getNextPage := func(ctx context.Context, req GetEvents) (*GetEventsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.clustersImpl.Events(ctx, req) - } - getItems := func(resp *GetEventsResponse) []ClusterEvent { - return resp.Events - } - getNextReq := func(resp *GetEventsResponse) *GetEvents { - if resp.NextPage == nil { - return nil - } - request = *resp.NextPage - - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List cluster activity events. -// -// Retrieves a list of events about the activity of a cluster. This API is -// paginated. If there are more events to read, the response includes all the -// nparameters necessary to request the next page of events. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ClustersAPI) EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error) { - iterator := a.Events(ctx, request) - return listing.ToSliceN[ClusterEvent, int64](ctx, iterator, request.Limit) - -} - // Get cluster info. // // Retrieves the information for a cluster given its identifier. Clusters can be @@ -1028,50 +950,6 @@ func (a *ClustersAPI) GetPermissionsByClusterId(ctx context.Context, clusterId s }) } -// List clusters. -// -// Return information about all pinned and active clusters, and all clusters -// terminated within the last 30 days. Clusters terminated prior to this period -// are not included. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ClustersAPI) List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails] { - - getNextPage := func(ctx context.Context, req ListClustersRequest) (*ListClustersResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.clustersImpl.List(ctx, req) - } - getItems := func(resp *ListClustersResponse) []ClusterDetails { - return resp.Clusters - } - getNextReq := func(resp *ListClustersResponse) *ListClustersRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List clusters. -// -// Return information about all pinned and active clusters, and all clusters -// terminated within the last 30 days. Clusters terminated prior to this period -// are not included. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ClustersAPI) ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[ClusterDetails, int](ctx, iterator, request.PageSize) - -} - // ClusterDetailsClusterNameToClusterIdMap calls [ClustersAPI.ListAll] and creates a map of results with [ClusterDetails].ClusterName as key and [ClusterDetails].ClusterId as value. // // Returns an error if there's more than one [ClusterDetails] with the same .ClusterName. @@ -1919,46 +1797,6 @@ func (a *GlobalInitScriptsAPI) GetByScriptId(ctx context.Context, scriptId strin }) } -// Get init scripts. -// -// Get a list of all global init scripts for this workspace. This returns all -// properties for each script but **not** the script contents. To retrieve the -// contents of a script, use the [get a global init -// script](:method:globalinitscripts/get) operation. -// -// This method is generated by Databricks SDK Code Generator. -func (a *GlobalInitScriptsAPI) List(ctx context.Context) listing.Iterator[GlobalInitScriptDetails] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListGlobalInitScriptsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.globalInitScriptsImpl.List(ctx) - } - getItems := func(resp *ListGlobalInitScriptsResponse) []GlobalInitScriptDetails { - return resp.Scripts - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get init scripts. -// -// Get a list of all global init scripts for this workspace. This returns all -// properties for each script but **not** the script contents. To retrieve the -// contents of a script, use the [get a global init -// script](:method:globalinitscripts/get) operation. -// -// This method is generated by Databricks SDK Code Generator. -func (a *GlobalInitScriptsAPI) ListAll(ctx context.Context) ([]GlobalInitScriptDetails, error) { - iterator := a.List(ctx) - return listing.ToSlice[GlobalInitScriptDetails](ctx, iterator) -} - // GlobalInitScriptDetailsNameToScriptIdMap calls [GlobalInitScriptsAPI.ListAll] and creates a map of results with [GlobalInitScriptDetails].Name as key and [GlobalInitScriptDetails].ScriptId as value. // // Returns an error if there's more than one [GlobalInitScriptDetails] with the same .Name. @@ -2182,40 +2020,6 @@ func (a *InstancePoolsAPI) GetPermissionsByInstancePoolId(ctx context.Context, i }) } -// List instance pool info. -// -// Gets a list of instance pools with their statistics. -// -// This method is generated by Databricks SDK Code Generator. -func (a *InstancePoolsAPI) List(ctx context.Context) listing.Iterator[InstancePoolAndStats] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListInstancePools, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.instancePoolsImpl.List(ctx) - } - getItems := func(resp *ListInstancePools) []InstancePoolAndStats { - return resp.InstancePools - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List instance pool info. -// -// Gets a list of instance pools with their statistics. -// -// This method is generated by Databricks SDK Code Generator. -func (a *InstancePoolsAPI) ListAll(ctx context.Context) ([]InstancePoolAndStats, error) { - iterator := a.List(ctx) - return listing.ToSlice[InstancePoolAndStats](ctx, iterator) -} - // InstancePoolAndStatsInstancePoolNameToInstancePoolIdMap calls [InstancePoolsAPI.ListAll] and creates a map of results with [InstancePoolAndStats].InstancePoolName as key and [InstancePoolAndStats].InstancePoolId as value. // // Returns an error if there's more than one [InstancePoolAndStats] with the same .InstancePoolName. @@ -2349,44 +2153,6 @@ type InstanceProfilesAPI struct { instanceProfilesImpl } -// List available instance profiles. -// -// List the instance profiles that the calling user can use to launch a cluster. -// -// This API is available to all users. -// -// This method is generated by Databricks SDK Code Generator. -func (a *InstanceProfilesAPI) List(ctx context.Context) listing.Iterator[InstanceProfile] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListInstanceProfilesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.instanceProfilesImpl.List(ctx) - } - getItems := func(resp *ListInstanceProfilesResponse) []InstanceProfile { - return resp.InstanceProfiles - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List available instance profiles. -// -// List the instance profiles that the calling user can use to launch a cluster. -// -// This API is available to all users. -// -// This method is generated by Databricks SDK Code Generator. -func (a *InstanceProfilesAPI) ListAll(ctx context.Context) ([]InstanceProfile, error) { - iterator := a.List(ctx) - return listing.ToSlice[InstanceProfile](ctx, iterator) -} - // Remove the instance profile. // // Remove the instance profile with the provided ARN. Existing clusters with @@ -2497,87 +2263,6 @@ type LibrariesAPI struct { librariesImpl } -// Get all statuses. -// -// Get the status of all libraries on all clusters. A status is returned for all -// libraries installed on this cluster via the API or the libraries UI. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LibrariesAPI) AllClusterStatuses(ctx context.Context) listing.Iterator[ClusterLibraryStatuses] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListAllClusterLibraryStatusesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.librariesImpl.AllClusterStatuses(ctx) - } - getItems := func(resp *ListAllClusterLibraryStatusesResponse) []ClusterLibraryStatuses { - return resp.Statuses - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get all statuses. -// -// Get the status of all libraries on all clusters. A status is returned for all -// libraries installed on this cluster via the API or the libraries UI. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LibrariesAPI) AllClusterStatusesAll(ctx context.Context) ([]ClusterLibraryStatuses, error) { - iterator := a.AllClusterStatuses(ctx) - return listing.ToSlice[ClusterLibraryStatuses](ctx, iterator) -} - -// Get status. -// -// Get the status of libraries on a cluster. A status is returned for all -// libraries installed on this cluster via the API or the libraries UI. The -// order of returned libraries is as follows: 1. Libraries set to be installed -// on this cluster, in the order that the libraries were added to the cluster, -// are returned first. 2. Libraries that were previously requested to be -// installed on this cluster or, but are now marked for removal, in no -// particular order, are returned last. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LibrariesAPI) ClusterStatus(ctx context.Context, request ClusterStatus) listing.Iterator[LibraryFullStatus] { - - getNextPage := func(ctx context.Context, req ClusterStatus) (*ClusterLibraryStatuses, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.librariesImpl.ClusterStatus(ctx, req) - } - getItems := func(resp *ClusterLibraryStatuses) []LibraryFullStatus { - return resp.LibraryStatuses - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get status. -// -// Get the status of libraries on a cluster. A status is returned for all -// libraries installed on this cluster via the API or the libraries UI. The -// order of returned libraries is as follows: 1. Libraries set to be installed -// on this cluster, in the order that the libraries were added to the cluster, -// are returned first. 2. Libraries that were previously requested to be -// installed on this cluster or, but are now marked for removal, in no -// particular order, are returned last. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LibrariesAPI) ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error) { - iterator := a.ClusterStatus(ctx, request) - return listing.ToSlice[LibraryFullStatus](ctx, iterator) -} - // Get status. // // Get the status of libraries on a cluster. A status is returned for all @@ -2588,7 +2273,7 @@ func (a *LibrariesAPI) ClusterStatusAll(ctx context.Context, request ClusterStat // installed on this cluster or, but are now marked for removal, in no // particular order, are returned last. func (a *LibrariesAPI) ClusterStatusByClusterId(ctx context.Context, clusterId string) (*ClusterLibraryStatuses, error) { - return a.librariesImpl.ClusterStatus(ctx, ClusterStatus{ + return a.librariesImpl.internalClusterStatus(ctx, ClusterStatus{ ClusterId: clusterId, }) } @@ -2675,49 +2360,6 @@ func (a *PolicyComplianceForClustersAPI) GetComplianceByClusterId(ctx context.Co }) } -// List cluster policy compliance. -// -// Returns the policy compliance status of all clusters that use a given policy. -// Clusters could be out of compliance if their policy was updated after the -// cluster was last edited. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PolicyComplianceForClustersAPI) ListCompliance(ctx context.Context, request ListClusterCompliancesRequest) listing.Iterator[ClusterCompliance] { - - getNextPage := func(ctx context.Context, req ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.policyComplianceForClustersImpl.ListCompliance(ctx, req) - } - getItems := func(resp *ListClusterCompliancesResponse) []ClusterCompliance { - return resp.Clusters - } - getNextReq := func(resp *ListClusterCompliancesResponse) *ListClusterCompliancesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List cluster policy compliance. -// -// Returns the policy compliance status of all clusters that use a given policy. -// Clusters could be out of compliance if their policy was updated after the -// cluster was last edited. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PolicyComplianceForClustersAPI) ListComplianceAll(ctx context.Context, request ListClusterCompliancesRequest) ([]ClusterCompliance, error) { - iterator := a.ListCompliance(ctx, request) - return listing.ToSlice[ClusterCompliance](ctx, iterator) -} - type PolicyFamiliesInterface interface { // Get policy family information. @@ -2779,44 +2421,3 @@ func (a *PolicyFamiliesAPI) GetByPolicyFamilyId(ctx context.Context, policyFamil PolicyFamilyId: policyFamilyId, }) } - -// List policy families. -// -// Returns the list of policy definition types available to use at their latest -// version. This API is paginated. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PolicyFamiliesAPI) List(ctx context.Context, request ListPolicyFamiliesRequest) listing.Iterator[PolicyFamily] { - - getNextPage := func(ctx context.Context, req ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.policyFamiliesImpl.List(ctx, req) - } - getItems := func(resp *ListPolicyFamiliesResponse) []PolicyFamily { - return resp.PolicyFamilies - } - getNextReq := func(resp *ListPolicyFamiliesResponse) *ListPolicyFamiliesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List policy families. -// -// Returns the list of policy definition types available to use at their latest -// version. This API is paginated. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PolicyFamiliesAPI) ListAll(ctx context.Context, request ListPolicyFamiliesRequest) ([]PolicyFamily, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[PolicyFamily](ctx, iterator) -} diff --git a/service/compute/impl.go b/service/compute/impl.go index 4b4d7f6bb..857e6229f 100755 --- a/service/compute/impl.go +++ b/service/compute/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just ClusterPolicies API methods @@ -78,7 +80,36 @@ func (a *clusterPoliciesImpl) GetPermissions(ctx context.Context, request GetClu return &clusterPolicyPermissions, err } -func (a *clusterPoliciesImpl) List(ctx context.Context, request ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { +// List cluster policies. +// +// Returns a list of policies accessible by the requesting user. +func (a *clusterPoliciesImpl) List(ctx context.Context, request ListClusterPoliciesRequest) listing.Iterator[Policy] { + + getNextPage := func(ctx context.Context, req ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListPoliciesResponse) []Policy { + return resp.Policies + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List cluster policies. +// +// Returns a list of policies accessible by the requesting user. +func (a *clusterPoliciesImpl) ListAll(ctx context.Context, request ListClusterPoliciesRequest) ([]Policy, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Policy](ctx, iterator) +} + +func (a *clusterPoliciesImpl) internalList(ctx context.Context, request ListClusterPoliciesRequest) (*ListPoliciesResponse, error) { var listPoliciesResponse ListPoliciesResponse path := "/api/2.0/policies/clusters/list" queryParams := make(map[string]any) @@ -159,7 +190,48 @@ func (a *clustersImpl) Edit(ctx context.Context, request EditCluster) error { return err } -func (a *clustersImpl) Events(ctx context.Context, request GetEvents) (*GetEventsResponse, error) { +// List cluster activity events. +// +// Retrieves a list of events about the activity of a cluster. This API is +// paginated. If there are more events to read, the response includes all the +// nparameters necessary to request the next page of events. +func (a *clustersImpl) Events(ctx context.Context, request GetEvents) listing.Iterator[ClusterEvent] { + + getNextPage := func(ctx context.Context, req GetEvents) (*GetEventsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalEvents(ctx, req) + } + getItems := func(resp *GetEventsResponse) []ClusterEvent { + return resp.Events + } + getNextReq := func(resp *GetEventsResponse) *GetEvents { + if resp.NextPage == nil { + return nil + } + request = *resp.NextPage + + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List cluster activity events. +// +// Retrieves a list of events about the activity of a cluster. This API is +// paginated. If there are more events to read, the response includes all the +// nparameters necessary to request the next page of events. +func (a *clustersImpl) EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error) { + iterator := a.Events(ctx, request) + return listing.ToSliceN[ClusterEvent, int64](ctx, iterator, request.Limit) + +} + +func (a *clustersImpl) internalEvents(ctx context.Context, request GetEvents) (*GetEventsResponse, error) { var getEventsResponse GetEventsResponse path := "/api/2.1/clusters/events" queryParams := make(map[string]any) @@ -200,7 +272,47 @@ func (a *clustersImpl) GetPermissions(ctx context.Context, request GetClusterPer return &clusterPermissions, err } -func (a *clustersImpl) List(ctx context.Context, request ListClustersRequest) (*ListClustersResponse, error) { +// List clusters. +// +// Return information about all pinned and active clusters, and all clusters +// terminated within the last 30 days. Clusters terminated prior to this period +// are not included. +func (a *clustersImpl) List(ctx context.Context, request ListClustersRequest) listing.Iterator[ClusterDetails] { + + getNextPage := func(ctx context.Context, req ListClustersRequest) (*ListClustersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListClustersResponse) []ClusterDetails { + return resp.Clusters + } + getNextReq := func(resp *ListClustersResponse) *ListClustersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List clusters. +// +// Return information about all pinned and active clusters, and all clusters +// terminated within the last 30 days. Clusters terminated prior to this period +// are not included. +func (a *clustersImpl) ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[ClusterDetails, int](ctx, iterator, request.PageSize) + +} + +func (a *clustersImpl) internalList(ctx context.Context, request ListClustersRequest) (*ListClustersResponse, error) { var listClustersResponse ListClustersResponse path := "/api/2.1/clusters/list" queryParams := make(map[string]any) @@ -443,7 +555,43 @@ func (a *globalInitScriptsImpl) Get(ctx context.Context, request GetGlobalInitSc return &globalInitScriptDetailsWithContent, err } -func (a *globalInitScriptsImpl) List(ctx context.Context) (*ListGlobalInitScriptsResponse, error) { +// Get init scripts. +// +// Get a list of all global init scripts for this workspace. This returns all +// properties for each script but **not** the script contents. To retrieve the +// contents of a script, use the [get a global init +// script](:method:globalinitscripts/get) operation. +func (a *globalInitScriptsImpl) List(ctx context.Context) listing.Iterator[GlobalInitScriptDetails] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListGlobalInitScriptsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListGlobalInitScriptsResponse) []GlobalInitScriptDetails { + return resp.Scripts + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get init scripts. +// +// Get a list of all global init scripts for this workspace. This returns all +// properties for each script but **not** the script contents. To retrieve the +// contents of a script, use the [get a global init +// script](:method:globalinitscripts/get) operation. +func (a *globalInitScriptsImpl) ListAll(ctx context.Context) ([]GlobalInitScriptDetails, error) { + iterator := a.List(ctx) + return listing.ToSlice[GlobalInitScriptDetails](ctx, iterator) +} + +func (a *globalInitScriptsImpl) internalList(ctx context.Context) (*ListGlobalInitScriptsResponse, error) { var listGlobalInitScriptsResponse ListGlobalInitScriptsResponse path := "/api/2.0/global-init-scripts" @@ -531,7 +679,37 @@ func (a *instancePoolsImpl) GetPermissions(ctx context.Context, request GetInsta return &instancePoolPermissions, err } -func (a *instancePoolsImpl) List(ctx context.Context) (*ListInstancePools, error) { +// List instance pool info. +// +// Gets a list of instance pools with their statistics. +func (a *instancePoolsImpl) List(ctx context.Context) listing.Iterator[InstancePoolAndStats] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListInstancePools, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListInstancePools) []InstancePoolAndStats { + return resp.InstancePools + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List instance pool info. +// +// Gets a list of instance pools with their statistics. +func (a *instancePoolsImpl) ListAll(ctx context.Context) ([]InstancePoolAndStats, error) { + iterator := a.List(ctx) + return listing.ToSlice[InstancePoolAndStats](ctx, iterator) +} + +func (a *instancePoolsImpl) internalList(ctx context.Context) (*ListInstancePools, error) { var listInstancePools ListInstancePools path := "/api/2.0/instance-pools/list" @@ -590,7 +768,41 @@ func (a *instanceProfilesImpl) Edit(ctx context.Context, request InstanceProfile return err } -func (a *instanceProfilesImpl) List(ctx context.Context) (*ListInstanceProfilesResponse, error) { +// List available instance profiles. +// +// List the instance profiles that the calling user can use to launch a cluster. +// +// This API is available to all users. +func (a *instanceProfilesImpl) List(ctx context.Context) listing.Iterator[InstanceProfile] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListInstanceProfilesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListInstanceProfilesResponse) []InstanceProfile { + return resp.InstanceProfiles + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List available instance profiles. +// +// List the instance profiles that the calling user can use to launch a cluster. +// +// This API is available to all users. +func (a *instanceProfilesImpl) ListAll(ctx context.Context) ([]InstanceProfile, error) { + iterator := a.List(ctx) + return listing.ToSlice[InstanceProfile](ctx, iterator) +} + +func (a *instanceProfilesImpl) internalList(ctx context.Context) (*ListInstanceProfilesResponse, error) { var listInstanceProfilesResponse ListInstanceProfilesResponse path := "/api/2.0/instance-profiles/list" @@ -616,7 +828,39 @@ type librariesImpl struct { client *client.DatabricksClient } -func (a *librariesImpl) AllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error) { +// Get all statuses. +// +// Get the status of all libraries on all clusters. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. +func (a *librariesImpl) AllClusterStatuses(ctx context.Context) listing.Iterator[ClusterLibraryStatuses] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListAllClusterLibraryStatusesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalAllClusterStatuses(ctx) + } + getItems := func(resp *ListAllClusterLibraryStatusesResponse) []ClusterLibraryStatuses { + return resp.Statuses + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all statuses. +// +// Get the status of all libraries on all clusters. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. +func (a *librariesImpl) AllClusterStatusesAll(ctx context.Context) ([]ClusterLibraryStatuses, error) { + iterator := a.AllClusterStatuses(ctx) + return listing.ToSlice[ClusterLibraryStatuses](ctx, iterator) +} + +func (a *librariesImpl) internalAllClusterStatuses(ctx context.Context) (*ListAllClusterLibraryStatusesResponse, error) { var listAllClusterLibraryStatusesResponse ListAllClusterLibraryStatusesResponse path := "/api/2.0/libraries/all-cluster-statuses" @@ -626,7 +870,48 @@ func (a *librariesImpl) AllClusterStatuses(ctx context.Context) (*ListAllCluster return &listAllClusterLibraryStatusesResponse, err } -func (a *librariesImpl) ClusterStatus(ctx context.Context, request ClusterStatus) (*ClusterLibraryStatuses, error) { +// Get status. +// +// Get the status of libraries on a cluster. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. The +// order of returned libraries is as follows: 1. Libraries set to be installed +// on this cluster, in the order that the libraries were added to the cluster, +// are returned first. 2. Libraries that were previously requested to be +// installed on this cluster or, but are now marked for removal, in no +// particular order, are returned last. +func (a *librariesImpl) ClusterStatus(ctx context.Context, request ClusterStatus) listing.Iterator[LibraryFullStatus] { + + getNextPage := func(ctx context.Context, req ClusterStatus) (*ClusterLibraryStatuses, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalClusterStatus(ctx, req) + } + getItems := func(resp *ClusterLibraryStatuses) []LibraryFullStatus { + return resp.LibraryStatuses + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get status. +// +// Get the status of libraries on a cluster. A status is returned for all +// libraries installed on this cluster via the API or the libraries UI. The +// order of returned libraries is as follows: 1. Libraries set to be installed +// on this cluster, in the order that the libraries were added to the cluster, +// are returned first. 2. Libraries that were previously requested to be +// installed on this cluster or, but are now marked for removal, in no +// particular order, are returned last. +func (a *librariesImpl) ClusterStatusAll(ctx context.Context, request ClusterStatus) ([]LibraryFullStatus, error) { + iterator := a.ClusterStatus(ctx, request) + return listing.ToSlice[LibraryFullStatus](ctx, iterator) +} + +func (a *librariesImpl) internalClusterStatus(ctx context.Context, request ClusterStatus) (*ClusterLibraryStatuses, error) { var clusterLibraryStatuses ClusterLibraryStatuses path := "/api/2.0/libraries/cluster-status" queryParams := make(map[string]any) @@ -684,7 +969,46 @@ func (a *policyComplianceForClustersImpl) GetCompliance(ctx context.Context, req return &getClusterComplianceResponse, err } -func (a *policyComplianceForClustersImpl) ListCompliance(ctx context.Context, request ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { +// List cluster policy compliance. +// +// Returns the policy compliance status of all clusters that use a given policy. +// Clusters could be out of compliance if their policy was updated after the +// cluster was last edited. +func (a *policyComplianceForClustersImpl) ListCompliance(ctx context.Context, request ListClusterCompliancesRequest) listing.Iterator[ClusterCompliance] { + + getNextPage := func(ctx context.Context, req ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListCompliance(ctx, req) + } + getItems := func(resp *ListClusterCompliancesResponse) []ClusterCompliance { + return resp.Clusters + } + getNextReq := func(resp *ListClusterCompliancesResponse) *ListClusterCompliancesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List cluster policy compliance. +// +// Returns the policy compliance status of all clusters that use a given policy. +// Clusters could be out of compliance if their policy was updated after the +// cluster was last edited. +func (a *policyComplianceForClustersImpl) ListComplianceAll(ctx context.Context, request ListClusterCompliancesRequest) ([]ClusterCompliance, error) { + iterator := a.ListCompliance(ctx, request) + return listing.ToSlice[ClusterCompliance](ctx, iterator) +} + +func (a *policyComplianceForClustersImpl) internalListCompliance(ctx context.Context, request ListClusterCompliancesRequest) (*ListClusterCompliancesResponse, error) { var listClusterCompliancesResponse ListClusterCompliancesResponse path := "/api/2.0/policies/clusters/list-compliance" queryParams := make(map[string]any) @@ -709,7 +1033,44 @@ func (a *policyFamiliesImpl) Get(ctx context.Context, request GetPolicyFamilyReq return &policyFamily, err } -func (a *policyFamiliesImpl) List(ctx context.Context, request ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { +// List policy families. +// +// Returns the list of policy definition types available to use at their latest +// version. This API is paginated. +func (a *policyFamiliesImpl) List(ctx context.Context, request ListPolicyFamiliesRequest) listing.Iterator[PolicyFamily] { + + getNextPage := func(ctx context.Context, req ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListPolicyFamiliesResponse) []PolicyFamily { + return resp.PolicyFamilies + } + getNextReq := func(resp *ListPolicyFamiliesResponse) *ListPolicyFamiliesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List policy families. +// +// Returns the list of policy definition types available to use at their latest +// version. This API is paginated. +func (a *policyFamiliesImpl) ListAll(ctx context.Context, request ListPolicyFamiliesRequest) ([]PolicyFamily, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PolicyFamily](ctx, iterator) +} + +func (a *policyFamiliesImpl) internalList(ctx context.Context, request ListPolicyFamiliesRequest) (*ListPolicyFamiliesResponse, error) { var listPolicyFamiliesResponse ListPolicyFamiliesResponse path := "/api/2.0/policy-families" queryParams := make(map[string]any) diff --git a/service/compute/model.go b/service/compute/model.go index fbf873ca2..4bb1066a4 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -417,11 +417,12 @@ type ClusterAttributes struct { // specified at cluster creation, a set of default values will be used. AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` // The configuration for delivering spark logs to a long-term storage - // destination. Two kinds of destinations (dbfs and s3) are supported. Only - // one destination can be specified for one cluster. If the conf is given, - // the logs will be delivered to the destination every `5 mins`. The - // destination of driver logs is `$destination/$clusterId/driver`, while the - // destination of executor logs is `$destination/$clusterId/executor`. + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. @@ -620,11 +621,12 @@ type ClusterDetails struct { // restarts and resizes, while each new cluster has a globally unique id. ClusterId string `json:"cluster_id,omitempty"` // The configuration for delivering spark logs to a long-term storage - // destination. Two kinds of destinations (dbfs and s3) are supported. Only - // one destination can be specified for one cluster. If the conf is given, - // the logs will be delivered to the destination every `5 mins`. The - // destination of driver logs is `$destination/$clusterId/driver`, while the - // destination of executor logs is `$destination/$clusterId/executor`. + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` // Cluster log delivery status. ClusterLogStatus *LogSyncStatus `json:"cluster_log_status,omitempty"` @@ -899,6 +901,9 @@ type ClusterLogConf struct { // the cluster iam role in `instance_profile_arn` has permission to write // data to the s3 destination. S3 *S3StorageInfo `json:"s3,omitempty"` + // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // "/Volumes/catalog/schema/volume/cluster_log" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` } type ClusterPermission struct { @@ -1234,11 +1239,12 @@ type ClusterSpec struct { // specified at cluster creation, a set of default values will be used. AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` // The configuration for delivering spark logs to a long-term storage - // destination. Two kinds of destinations (dbfs and s3) are supported. Only - // one destination can be specified for one cluster. If the conf is given, - // the logs will be delivered to the destination every `5 mins`. The - // destination of driver logs is `$destination/$clusterId/driver`, while the - // destination of executor logs is `$destination/$clusterId/executor`. + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. @@ -1566,11 +1572,12 @@ type CreateCluster struct { // creation of a new cluster. CloneFrom *CloneCluster `json:"clone_from,omitempty"` // The configuration for delivering spark logs to a long-term storage - // destination. Two kinds of destinations (dbfs and s3) are supported. Only - // one destination can be specified for one cluster. If the conf is given, - // the logs will be delivered to the destination every `5 mins`. The - // destination of driver logs is `$destination/$clusterId/driver`, while the - // destination of executor logs is `$destination/$clusterId/executor`. + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. @@ -2339,11 +2346,12 @@ type EditCluster struct { // ID of the cluster ClusterId string `json:"cluster_id"` // The configuration for delivering spark logs to a long-term storage - // destination. Two kinds of destinations (dbfs and s3) are supported. Only - // one destination can be specified for one cluster. If the conf is given, - // the logs will be delivered to the destination every `5 mins`. The - // destination of driver logs is `$destination/$clusterId/driver`, while the - // destination of executor logs is `$destination/$clusterId/executor`. + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. @@ -5338,11 +5346,12 @@ type UpdateClusterResource struct { // specified at cluster creation, a set of default values will be used. AzureAttributes *AzureAttributes `json:"azure_attributes,omitempty"` // The configuration for delivering spark logs to a long-term storage - // destination. Two kinds of destinations (dbfs and s3) are supported. Only - // one destination can be specified for one cluster. If the conf is given, - // the logs will be delivered to the destination every `5 mins`. The - // destination of driver logs is `$destination/$clusterId/driver`, while the - // destination of executor logs is `$destination/$clusterId/executor`. + // destination. Three kinds of destinations (DBFS, S3 and Unity Catalog + // volumes) are supported. Only one destination can be specified for one + // cluster. If the conf is given, the logs will be delivered to the + // destination every `5 mins`. The destination of driver logs is + // `$destination/$clusterId/driver`, while the destination of executor logs + // is `$destination/$clusterId/executor`. ClusterLogConf *ClusterLogConf `json:"cluster_log_conf,omitempty"` // Cluster name requested by the user. This doesn't have to be unique. If // not specified at creation, the cluster name will be an empty string. @@ -5511,7 +5520,8 @@ type UpdateResponse struct { } type VolumesStorageInfo struct { - // Unity Catalog Volumes file destination, e.g. `/Volumes/my-init.sh` + // Unity Catalog volumes file destination, e.g. + // `/Volumes/catalog/schema/volume/dir/file` Destination string `json:"destination"` } diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 11f706c95..bba3aa4ba 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -63,6 +63,18 @@ type GenieInterface interface { // `EXECUTING_QUERY`. GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieGetMessageQueryResultResponse, error) + // Get conversation message SQL query result by attachment id. + // + // Get the result of SQL query by attachment id This is only available if a + // message has a query attachment and the message status is `EXECUTING_QUERY`. + GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) + + // Get conversation message SQL query result by attachment id. + // + // Get the result of SQL query by attachment id This is only available if a + // message has a query attachment and the message status is `EXECUTING_QUERY`. + GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) + // Start conversation. // // Start a new conversation. @@ -225,6 +237,19 @@ func (a *GenieAPI) GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(c }) } +// Get conversation message SQL query result by attachment id. +// +// Get the result of SQL query by attachment id This is only available if a +// message has a query attachment and the message status is `EXECUTING_QUERY`. +func (a *GenieAPI) GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) { + return a.genieImpl.GetMessageQueryResultByAttachment(ctx, GenieGetQueryResultByAttachmentRequest{ + SpaceId: spaceId, + ConversationId: conversationId, + MessageId: messageId, + AttachmentId: attachmentId, + }) +} + // Start conversation. // // Start a new conversation. @@ -472,121 +497,16 @@ func (a *LakeviewAPI) GetSubscriptionByDashboardIdAndScheduleIdAndSubscriptionId }) } -// List dashboards. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LakeviewAPI) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { - - getNextPage := func(ctx context.Context, req ListDashboardsRequest) (*ListDashboardsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.lakeviewImpl.List(ctx, req) - } - getItems := func(resp *ListDashboardsResponse) []Dashboard { - return resp.Dashboards - } - getNextReq := func(resp *ListDashboardsResponse) *ListDashboardsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List dashboards. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LakeviewAPI) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[Dashboard](ctx, iterator) -} - -// List dashboard schedules. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LakeviewAPI) ListSchedules(ctx context.Context, request ListSchedulesRequest) listing.Iterator[Schedule] { - - getNextPage := func(ctx context.Context, req ListSchedulesRequest) (*ListSchedulesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.lakeviewImpl.ListSchedules(ctx, req) - } - getItems := func(resp *ListSchedulesResponse) []Schedule { - return resp.Schedules - } - getNextReq := func(resp *ListSchedulesResponse) *ListSchedulesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List dashboard schedules. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LakeviewAPI) ListSchedulesAll(ctx context.Context, request ListSchedulesRequest) ([]Schedule, error) { - iterator := a.ListSchedules(ctx, request) - return listing.ToSlice[Schedule](ctx, iterator) -} - // List dashboard schedules. func (a *LakeviewAPI) ListSchedulesByDashboardId(ctx context.Context, dashboardId string) (*ListSchedulesResponse, error) { - return a.lakeviewImpl.ListSchedules(ctx, ListSchedulesRequest{ + return a.lakeviewImpl.internalListSchedules(ctx, ListSchedulesRequest{ DashboardId: dashboardId, }) } -// List schedule subscriptions. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LakeviewAPI) ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) listing.Iterator[Subscription] { - - getNextPage := func(ctx context.Context, req ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.lakeviewImpl.ListSubscriptions(ctx, req) - } - getItems := func(resp *ListSubscriptionsResponse) []Subscription { - return resp.Subscriptions - } - getNextReq := func(resp *ListSubscriptionsResponse) *ListSubscriptionsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List schedule subscriptions. -// -// This method is generated by Databricks SDK Code Generator. -func (a *LakeviewAPI) ListSubscriptionsAll(ctx context.Context, request ListSubscriptionsRequest) ([]Subscription, error) { - iterator := a.ListSubscriptions(ctx, request) - return listing.ToSlice[Subscription](ctx, iterator) -} - // List schedule subscriptions. func (a *LakeviewAPI) ListSubscriptionsByDashboardIdAndScheduleId(ctx context.Context, dashboardId string, scheduleId string) (*ListSubscriptionsResponse, error) { - return a.lakeviewImpl.ListSubscriptions(ctx, ListSubscriptionsRequest{ + return a.lakeviewImpl.internalListSubscriptions(ctx, ListSubscriptionsRequest{ DashboardId: dashboardId, ScheduleId: scheduleId, }) diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index cd55850bc..0ea0af648 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just Genie API methods @@ -56,6 +58,16 @@ func (a *genieImpl) GetMessageQueryResult(ctx context.Context, request GenieGetM return &genieGetMessageQueryResultResponse, err } +func (a *genieImpl) GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) { + var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse + path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/query-result/%v", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieGetMessageQueryResultResponse) + return &genieGetMessageQueryResultResponse, err +} + func (a *genieImpl) StartConversation(ctx context.Context, request GenieStartConversationMessageRequest) (*GenieStartConversationResponse, error) { var genieStartConversationResponse GenieStartConversationResponse path := fmt.Sprintf("/api/2.0/genie/spaces/%v/start-conversation", request.SpaceId) @@ -165,7 +177,38 @@ func (a *lakeviewImpl) GetSubscription(ctx context.Context, request GetSubscript return &subscription, err } -func (a *lakeviewImpl) List(ctx context.Context, request ListDashboardsRequest) (*ListDashboardsResponse, error) { +// List dashboards. +func (a *lakeviewImpl) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { + + getNextPage := func(ctx context.Context, req ListDashboardsRequest) (*ListDashboardsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListDashboardsResponse) []Dashboard { + return resp.Dashboards + } + getNextReq := func(resp *ListDashboardsResponse) *ListDashboardsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List dashboards. +func (a *lakeviewImpl) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Dashboard](ctx, iterator) +} + +func (a *lakeviewImpl) internalList(ctx context.Context, request ListDashboardsRequest) (*ListDashboardsResponse, error) { var listDashboardsResponse ListDashboardsResponse path := "/api/2.0/lakeview/dashboards" queryParams := make(map[string]any) @@ -175,7 +218,38 @@ func (a *lakeviewImpl) List(ctx context.Context, request ListDashboardsRequest) return &listDashboardsResponse, err } -func (a *lakeviewImpl) ListSchedules(ctx context.Context, request ListSchedulesRequest) (*ListSchedulesResponse, error) { +// List dashboard schedules. +func (a *lakeviewImpl) ListSchedules(ctx context.Context, request ListSchedulesRequest) listing.Iterator[Schedule] { + + getNextPage := func(ctx context.Context, req ListSchedulesRequest) (*ListSchedulesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListSchedules(ctx, req) + } + getItems := func(resp *ListSchedulesResponse) []Schedule { + return resp.Schedules + } + getNextReq := func(resp *ListSchedulesResponse) *ListSchedulesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List dashboard schedules. +func (a *lakeviewImpl) ListSchedulesAll(ctx context.Context, request ListSchedulesRequest) ([]Schedule, error) { + iterator := a.ListSchedules(ctx, request) + return listing.ToSlice[Schedule](ctx, iterator) +} + +func (a *lakeviewImpl) internalListSchedules(ctx context.Context, request ListSchedulesRequest) (*ListSchedulesResponse, error) { var listSchedulesResponse ListSchedulesResponse path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules", request.DashboardId) queryParams := make(map[string]any) @@ -185,7 +259,38 @@ func (a *lakeviewImpl) ListSchedules(ctx context.Context, request ListSchedulesR return &listSchedulesResponse, err } -func (a *lakeviewImpl) ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { +// List schedule subscriptions. +func (a *lakeviewImpl) ListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) listing.Iterator[Subscription] { + + getNextPage := func(ctx context.Context, req ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListSubscriptions(ctx, req) + } + getItems := func(resp *ListSubscriptionsResponse) []Subscription { + return resp.Subscriptions + } + getNextReq := func(resp *ListSubscriptionsResponse) *ListSubscriptionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List schedule subscriptions. +func (a *lakeviewImpl) ListSubscriptionsAll(ctx context.Context, request ListSubscriptionsRequest) ([]Subscription, error) { + iterator := a.ListSubscriptions(ctx, request) + return listing.ToSlice[Subscription](ctx, iterator) +} + +func (a *lakeviewImpl) internalListSubscriptions(ctx context.Context, request ListSubscriptionsRequest) (*ListSubscriptionsResponse, error) { var listSubscriptionsResponse ListSubscriptionsResponse path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/schedules/%v/subscriptions", request.DashboardId, request.ScheduleId) queryParams := make(map[string]any) diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index aeff5a93d..f5892cd60 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -37,6 +37,13 @@ type GenieService interface { // status is `EXECUTING_QUERY`. GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) + // Get conversation message SQL query result by attachment id. + // + // Get the result of SQL query by attachment id This is only available if a + // message has a query attachment and the message status is + // `EXECUTING_QUERY`. + GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) + // Start conversation. // // Start a new conversation. diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 7c7fd809e..08f320ed5 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -362,6 +362,18 @@ type GenieGetMessageQueryResultResponse struct { StatementResponse *sql.StatementResponse `json:"statement_response,omitempty"` } +// Get conversation message SQL query result by attachment id +type GenieGetQueryResultByAttachmentRequest struct { + // Attachment ID + AttachmentId string `json:"-" url:"-"` + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Genie space ID + SpaceId string `json:"-" url:"-"` +} + type GenieMessage struct { // AI produced response to the message Attachments []GenieAttachment `json:"attachments,omitempty"` @@ -705,6 +717,8 @@ const MessageErrorTypeRetryableProcessingException MessageErrorType = `RETRYABLE const MessageErrorTypeSqlExecutionException MessageErrorType = `SQL_EXECUTION_EXCEPTION` +const MessageErrorTypeStopProcessDueToAutoRegenerate MessageErrorType = `STOP_PROCESS_DUE_TO_AUTO_REGENERATE` + const MessageErrorTypeTablesMissingException MessageErrorType = `TABLES_MISSING_EXCEPTION` const MessageErrorTypeTooManyCertifiedAnswersException MessageErrorType = `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION` @@ -727,11 +741,11 @@ func (f *MessageErrorType) String() string { // Set raw string value and validate it against allowed values func (f *MessageErrorType) Set(v string) error { switch v { - case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: + case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: *f = MessageErrorType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) + return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) } } diff --git a/service/files/api.go b/service/files/api.go index b57bd759f..027a369b1 100755 --- a/service/files/api.go +++ b/service/files/api.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" - "github.com/databricks/databricks-sdk-go/useragent" ) type DbfsInterface interface { @@ -223,59 +222,6 @@ func (a *DbfsAPI) GetStatusByPath(ctx context.Context, path string) (*FileInfo, }) } -// List directory contents or file details. -// -// List the contents of a directory, or details of the file. If the file or -// directory does not exist, this call throws an exception with -// `RESOURCE_DOES_NOT_EXIST`. -// -// When calling list on a large directory, the list operation will time out -// after approximately 60 seconds. We strongly recommend using list only on -// directories containing less than 10K files and discourage using the DBFS REST -// API for operations that list more than 10K files. Instead, we recommend that -// you perform such operations in the context of a cluster, using the [File -// system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), -// which provides the same functionality without timing out. -// -// This method is generated by Databricks SDK Code Generator. -func (a *DbfsAPI) List(ctx context.Context, request ListDbfsRequest) listing.Iterator[FileInfo] { - - getNextPage := func(ctx context.Context, req ListDbfsRequest) (*ListStatusResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.dbfsImpl.List(ctx, req) - } - getItems := func(resp *ListStatusResponse) []FileInfo { - return resp.Files - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List directory contents or file details. -// -// List the contents of a directory, or details of the file. If the file or -// directory does not exist, this call throws an exception with -// `RESOURCE_DOES_NOT_EXIST`. -// -// When calling list on a large directory, the list operation will time out -// after approximately 60 seconds. We strongly recommend using list only on -// directories containing less than 10K files and discourage using the DBFS REST -// API for operations that list more than 10K files. Instead, we recommend that -// you perform such operations in the context of a cluster, using the [File -// system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), -// which provides the same functionality without timing out. -// -// This method is generated by Databricks SDK Code Generator. -func (a *DbfsAPI) ListAll(ctx context.Context, request ListDbfsRequest) ([]FileInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[FileInfo](ctx, iterator) -} - // List directory contents or file details. // // List the contents of a directory, or details of the file. If the file or @@ -290,7 +236,7 @@ func (a *DbfsAPI) ListAll(ctx context.Context, request ListDbfsRequest) ([]FileI // system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), // which provides the same functionality without timing out. func (a *DbfsAPI) ListByPath(ctx context.Context, path string) (*ListStatusResponse, error) { - return a.dbfsImpl.List(ctx, ListDbfsRequest{ + return a.dbfsImpl.internalList(ctx, ListDbfsRequest{ Path: path, }) } @@ -523,54 +469,12 @@ func (a *FilesAPI) GetMetadataByFilePath(ctx context.Context, filePath string) ( }) } -// List directory contents. -// -// Returns the contents of a directory. If there is no directory at the -// specified path, the API returns a HTTP 404 error. -// -// This method is generated by Databricks SDK Code Generator. -func (a *FilesAPI) ListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) listing.Iterator[DirectoryEntry] { - - getNextPage := func(ctx context.Context, req ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.filesImpl.ListDirectoryContents(ctx, req) - } - getItems := func(resp *ListDirectoryResponse) []DirectoryEntry { - return resp.Contents - } - getNextReq := func(resp *ListDirectoryResponse) *ListDirectoryContentsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List directory contents. -// -// Returns the contents of a directory. If there is no directory at the -// specified path, the API returns a HTTP 404 error. -// -// This method is generated by Databricks SDK Code Generator. -func (a *FilesAPI) ListDirectoryContentsAll(ctx context.Context, request ListDirectoryContentsRequest) ([]DirectoryEntry, error) { - iterator := a.ListDirectoryContents(ctx, request) - return listing.ToSliceN[DirectoryEntry, int64](ctx, iterator, request.PageSize) - -} - // List directory contents. // // Returns the contents of a directory. If there is no directory at the // specified path, the API returns a HTTP 404 error. func (a *FilesAPI) ListDirectoryContentsByDirectoryPath(ctx context.Context, directoryPath string) (*ListDirectoryResponse, error) { - return a.filesImpl.ListDirectoryContents(ctx, ListDirectoryContentsRequest{ + return a.filesImpl.internalListDirectoryContents(ctx, ListDirectoryContentsRequest{ DirectoryPath: directoryPath, }) } diff --git a/service/files/impl.go b/service/files/impl.go index e3f6930bc..84a9001e6 100755 --- a/service/files/impl.go +++ b/service/files/impl.go @@ -9,6 +9,8 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" "golang.org/x/exp/slices" ) @@ -71,7 +73,56 @@ func (a *dbfsImpl) GetStatus(ctx context.Context, request GetStatusRequest) (*Fi return &fileInfo, err } -func (a *dbfsImpl) List(ctx context.Context, request ListDbfsRequest) (*ListStatusResponse, error) { +// List directory contents or file details. +// +// List the contents of a directory, or details of the file. If the file or +// directory does not exist, this call throws an exception with +// `RESOURCE_DOES_NOT_EXIST`. +// +// When calling list on a large directory, the list operation will time out +// after approximately 60 seconds. We strongly recommend using list only on +// directories containing less than 10K files and discourage using the DBFS REST +// API for operations that list more than 10K files. Instead, we recommend that +// you perform such operations in the context of a cluster, using the [File +// system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), +// which provides the same functionality without timing out. +func (a *dbfsImpl) List(ctx context.Context, request ListDbfsRequest) listing.Iterator[FileInfo] { + + getNextPage := func(ctx context.Context, req ListDbfsRequest) (*ListStatusResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListStatusResponse) []FileInfo { + return resp.Files + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List directory contents or file details. +// +// List the contents of a directory, or details of the file. If the file or +// directory does not exist, this call throws an exception with +// `RESOURCE_DOES_NOT_EXIST`. +// +// When calling list on a large directory, the list operation will time out +// after approximately 60 seconds. We strongly recommend using list only on +// directories containing less than 10K files and discourage using the DBFS REST +// API for operations that list more than 10K files. Instead, we recommend that +// you perform such operations in the context of a cluster, using the [File +// system utility (dbutils.fs)](/dev-tools/databricks-utils.html#dbutils-fs), +// which provides the same functionality without timing out. +func (a *dbfsImpl) ListAll(ctx context.Context, request ListDbfsRequest) ([]FileInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FileInfo](ctx, iterator) +} + +func (a *dbfsImpl) internalList(ctx context.Context, request ListDbfsRequest) (*ListStatusResponse, error) { var listStatusResponse ListStatusResponse path := "/api/2.0/dbfs/list" queryParams := make(map[string]any) @@ -184,7 +235,45 @@ func (a *filesImpl) GetMetadata(ctx context.Context, request GetMetadataRequest) return &getMetadataResponse, err } -func (a *filesImpl) ListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { +// List directory contents. +// +// Returns the contents of a directory. If there is no directory at the +// specified path, the API returns a HTTP 404 error. +func (a *filesImpl) ListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) listing.Iterator[DirectoryEntry] { + + getNextPage := func(ctx context.Context, req ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListDirectoryContents(ctx, req) + } + getItems := func(resp *ListDirectoryResponse) []DirectoryEntry { + return resp.Contents + } + getNextReq := func(resp *ListDirectoryResponse) *ListDirectoryContentsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List directory contents. +// +// Returns the contents of a directory. If there is no directory at the +// specified path, the API returns a HTTP 404 error. +func (a *filesImpl) ListDirectoryContentsAll(ctx context.Context, request ListDirectoryContentsRequest) ([]DirectoryEntry, error) { + iterator := a.ListDirectoryContents(ctx, request) + return listing.ToSliceN[DirectoryEntry, int64](ctx, iterator, request.PageSize) + +} + +func (a *filesImpl) internalListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { var listDirectoryResponse ListDirectoryResponse path := fmt.Sprintf("/api/2.0/fs/directories%v", httpclient.EncodeMultiSegmentPathParameter(request.DirectoryPath)) queryParams := make(map[string]any) diff --git a/service/iam/api.go b/service/iam/api.go index 008607f65..a10128923 100755 --- a/service/iam/api.go +++ b/service/iam/api.go @@ -218,55 +218,6 @@ func (a *AccountGroupsAPI) GetById(ctx context.Context, id string) (*Group, erro }) } -// List group details. -// -// Gets all details of the groups associated with the Databricks account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountGroupsAPI) List(ctx context.Context, request ListAccountGroupsRequest) listing.Iterator[Group] { - - request.StartIndex = 1 // SCIM offset starts from 1 - if request.Count == 0 { - request.Count = 100 - } - getNextPage := func(ctx context.Context, req ListAccountGroupsRequest) (*ListGroupsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.accountGroupsImpl.List(ctx, req) - } - getItems := func(resp *ListGroupsResponse) []Group { - return resp.Resources - } - getNextReq := func(resp *ListGroupsResponse) *ListAccountGroupsRequest { - if len(getItems(resp)) == 0 { - return nil - } - request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - dedupedIterator := listing.NewDedupeIterator[Group, string]( - iterator, - func(item Group) string { - return item.Id - }) - return dedupedIterator -} - -// List group details. -// -// Gets all details of the groups associated with the Databricks account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountGroupsAPI) ListAll(ctx context.Context, request ListAccountGroupsRequest) ([]Group, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[Group, int64](ctx, iterator, request.Count) - -} - // GroupDisplayNameToIdMap calls [AccountGroupsAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. // // Returns an error if there's more than one [Group] with the same .DisplayName. @@ -432,55 +383,6 @@ func (a *AccountServicePrincipalsAPI) GetById(ctx context.Context, id string) (* }) } -// List service principals. -// -// Gets the set of service principals associated with a Databricks account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountServicePrincipalsAPI) List(ctx context.Context, request ListAccountServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { - - request.StartIndex = 1 // SCIM offset starts from 1 - if request.Count == 0 { - request.Count = 100 - } - getNextPage := func(ctx context.Context, req ListAccountServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.accountServicePrincipalsImpl.List(ctx, req) - } - getItems := func(resp *ListServicePrincipalResponse) []ServicePrincipal { - return resp.Resources - } - getNextReq := func(resp *ListServicePrincipalResponse) *ListAccountServicePrincipalsRequest { - if len(getItems(resp)) == 0 { - return nil - } - request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - dedupedIterator := listing.NewDedupeIterator[ServicePrincipal, string]( - iterator, - func(item ServicePrincipal) string { - return item.Id - }) - return dedupedIterator -} - -// List service principals. -// -// Gets the set of service principals associated with a Databricks account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountServicePrincipalsAPI) ListAll(ctx context.Context, request ListAccountServicePrincipalsRequest) ([]ServicePrincipal, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[ServicePrincipal, int64](ctx, iterator, request.Count) - -} - // ServicePrincipalDisplayNameToIdMap calls [AccountServicePrincipalsAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. @@ -650,55 +552,6 @@ func (a *AccountUsersAPI) GetById(ctx context.Context, id string) (*User, error) }) } -// List users. -// -// Gets details for all the users associated with a Databricks account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountUsersAPI) List(ctx context.Context, request ListAccountUsersRequest) listing.Iterator[User] { - - request.StartIndex = 1 // SCIM offset starts from 1 - if request.Count == 0 { - request.Count = 100 - } - getNextPage := func(ctx context.Context, req ListAccountUsersRequest) (*ListUsersResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.accountUsersImpl.List(ctx, req) - } - getItems := func(resp *ListUsersResponse) []User { - return resp.Resources - } - getNextReq := func(resp *ListUsersResponse) *ListAccountUsersRequest { - if len(getItems(resp)) == 0 { - return nil - } - request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - dedupedIterator := listing.NewDedupeIterator[User, string]( - iterator, - func(item User) string { - return item.Id - }) - return dedupedIterator -} - -// List users. -// -// Gets details for all the users associated with a Databricks account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountUsersAPI) ListAll(ctx context.Context, request ListAccountUsersRequest) ([]User, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[User, int64](ctx, iterator, request.Count) - -} - // UserUserNameToIdMap calls [AccountUsersAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. // // Returns an error if there's more than one [User] with the same .UserName. @@ -882,55 +735,6 @@ func (a *GroupsAPI) GetById(ctx context.Context, id string) (*Group, error) { }) } -// List group details. -// -// Gets all details of the groups associated with the Databricks workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *GroupsAPI) List(ctx context.Context, request ListGroupsRequest) listing.Iterator[Group] { - - request.StartIndex = 1 // SCIM offset starts from 1 - if request.Count == 0 { - request.Count = 100 - } - getNextPage := func(ctx context.Context, req ListGroupsRequest) (*ListGroupsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.groupsImpl.List(ctx, req) - } - getItems := func(resp *ListGroupsResponse) []Group { - return resp.Resources - } - getNextReq := func(resp *ListGroupsResponse) *ListGroupsRequest { - if len(getItems(resp)) == 0 { - return nil - } - request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - dedupedIterator := listing.NewDedupeIterator[Group, string]( - iterator, - func(item Group) string { - return item.Id - }) - return dedupedIterator -} - -// List group details. -// -// Gets all details of the groups associated with the Databricks workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *GroupsAPI) ListAll(ctx context.Context, request ListGroupsRequest) ([]Group, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[Group, int64](ctx, iterator, request.Count) - -} - // GroupDisplayNameToIdMap calls [GroupsAPI.ListAll] and creates a map of results with [Group].DisplayName as key and [Group].Id as value. // // Returns an error if there's more than one [Group] with the same .DisplayName. @@ -1241,55 +1045,6 @@ func (a *ServicePrincipalsAPI) GetById(ctx context.Context, id string) (*Service }) } -// List service principals. -// -// Gets the set of service principals associated with a Databricks workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ServicePrincipalsAPI) List(ctx context.Context, request ListServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { - - request.StartIndex = 1 // SCIM offset starts from 1 - if request.Count == 0 { - request.Count = 100 - } - getNextPage := func(ctx context.Context, req ListServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.servicePrincipalsImpl.List(ctx, req) - } - getItems := func(resp *ListServicePrincipalResponse) []ServicePrincipal { - return resp.Resources - } - getNextReq := func(resp *ListServicePrincipalResponse) *ListServicePrincipalsRequest { - if len(getItems(resp)) == 0 { - return nil - } - request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - dedupedIterator := listing.NewDedupeIterator[ServicePrincipal, string]( - iterator, - func(item ServicePrincipal) string { - return item.Id - }) - return dedupedIterator -} - -// List service principals. -// -// Gets the set of service principals associated with a Databricks workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ServicePrincipalsAPI) ListAll(ctx context.Context, request ListServicePrincipalsRequest) ([]ServicePrincipal, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[ServicePrincipal, int64](ctx, iterator, request.Count) - -} - // ServicePrincipalDisplayNameToIdMap calls [ServicePrincipalsAPI.ListAll] and creates a map of results with [ServicePrincipal].DisplayName as key and [ServicePrincipal].Id as value. // // Returns an error if there's more than one [ServicePrincipal] with the same .DisplayName. @@ -1483,55 +1238,6 @@ func (a *UsersAPI) GetById(ctx context.Context, id string) (*User, error) { }) } -// List users. -// -// Gets details for all the users associated with a Databricks workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *UsersAPI) List(ctx context.Context, request ListUsersRequest) listing.Iterator[User] { - - request.StartIndex = 1 // SCIM offset starts from 1 - if request.Count == 0 { - request.Count = 100 - } - getNextPage := func(ctx context.Context, req ListUsersRequest) (*ListUsersResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.usersImpl.List(ctx, req) - } - getItems := func(resp *ListUsersResponse) []User { - return resp.Resources - } - getNextReq := func(resp *ListUsersResponse) *ListUsersRequest { - if len(getItems(resp)) == 0 { - return nil - } - request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - dedupedIterator := listing.NewDedupeIterator[User, string]( - iterator, - func(item User) string { - return item.Id - }) - return dedupedIterator -} - -// List users. -// -// Gets details for all the users associated with a Databricks workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *UsersAPI) ListAll(ctx context.Context, request ListUsersRequest) ([]User, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[User, int64](ctx, iterator, request.Count) - -} - // UserUserNameToIdMap calls [UsersAPI.ListAll] and creates a map of results with [User].UserName as key and [User].Id as value. // // Returns an error if there's more than one [User] with the same .UserName. @@ -1675,47 +1381,12 @@ func (a *WorkspaceAssignmentAPI) GetByWorkspaceId(ctx context.Context, workspace }) } -// Get permission assignments. -// -// Get the permission assignments for the specified Databricks account and -// Databricks workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *WorkspaceAssignmentAPI) List(ctx context.Context, request ListWorkspaceAssignmentRequest) listing.Iterator[PermissionAssignment] { - - getNextPage := func(ctx context.Context, req ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.workspaceAssignmentImpl.List(ctx, req) - } - getItems := func(resp *PermissionAssignments) []PermissionAssignment { - return resp.PermissionAssignments - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get permission assignments. -// -// Get the permission assignments for the specified Databricks account and -// Databricks workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *WorkspaceAssignmentAPI) ListAll(ctx context.Context, request ListWorkspaceAssignmentRequest) ([]PermissionAssignment, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[PermissionAssignment](ctx, iterator) -} - // Get permission assignments. // // Get the permission assignments for the specified Databricks account and // Databricks workspace. func (a *WorkspaceAssignmentAPI) ListByWorkspaceId(ctx context.Context, workspaceId int64) (*PermissionAssignments, error) { - return a.workspaceAssignmentImpl.List(ctx, ListWorkspaceAssignmentRequest{ + return a.workspaceAssignmentImpl.internalList(ctx, ListWorkspaceAssignmentRequest{ WorkspaceId: workspaceId, }) } diff --git a/service/iam/impl.go b/service/iam/impl.go index 8e9f360d3..f8a118799 100755 --- a/service/iam/impl.go +++ b/service/iam/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just AccessControl API methods @@ -132,7 +134,52 @@ func (a *accountGroupsImpl) Get(ctx context.Context, request GetAccountGroupRequ return &group, err } -func (a *accountGroupsImpl) List(ctx context.Context, request ListAccountGroupsRequest) (*ListGroupsResponse, error) { +// List group details. +// +// Gets all details of the groups associated with the Databricks account. +func (a *accountGroupsImpl) List(ctx context.Context, request ListAccountGroupsRequest) listing.Iterator[Group] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListAccountGroupsRequest) (*ListGroupsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListGroupsResponse) []Group { + return resp.Resources + } + getNextReq := func(resp *ListGroupsResponse) *ListAccountGroupsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[Group, string]( + iterator, + func(item Group) string { + return item.Id + }) + return dedupedIterator +} + +// List group details. +// +// Gets all details of the groups associated with the Databricks account. +func (a *accountGroupsImpl) ListAll(ctx context.Context, request ListAccountGroupsRequest) ([]Group, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[Group, int64](ctx, iterator, request.Count) + +} + +func (a *accountGroupsImpl) internalList(ctx context.Context, request ListAccountGroupsRequest) (*ListGroupsResponse, error) { var listGroupsResponse ListGroupsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -199,7 +246,52 @@ func (a *accountServicePrincipalsImpl) Get(ctx context.Context, request GetAccou return &servicePrincipal, err } -func (a *accountServicePrincipalsImpl) List(ctx context.Context, request ListAccountServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { +// List service principals. +// +// Gets the set of service principals associated with a Databricks account. +func (a *accountServicePrincipalsImpl) List(ctx context.Context, request ListAccountServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListAccountServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListServicePrincipalResponse) []ServicePrincipal { + return resp.Resources + } + getNextReq := func(resp *ListServicePrincipalResponse) *ListAccountServicePrincipalsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[ServicePrincipal, string]( + iterator, + func(item ServicePrincipal) string { + return item.Id + }) + return dedupedIterator +} + +// List service principals. +// +// Gets the set of service principals associated with a Databricks account. +func (a *accountServicePrincipalsImpl) ListAll(ctx context.Context, request ListAccountServicePrincipalsRequest) ([]ServicePrincipal, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[ServicePrincipal, int64](ctx, iterator, request.Count) + +} + +func (a *accountServicePrincipalsImpl) internalList(ctx context.Context, request ListAccountServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { var listServicePrincipalResponse ListServicePrincipalResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/ServicePrincipals", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -266,7 +358,52 @@ func (a *accountUsersImpl) Get(ctx context.Context, request GetAccountUserReques return &user, err } -func (a *accountUsersImpl) List(ctx context.Context, request ListAccountUsersRequest) (*ListUsersResponse, error) { +// List users. +// +// Gets details for all the users associated with a Databricks account. +func (a *accountUsersImpl) List(ctx context.Context, request ListAccountUsersRequest) listing.Iterator[User] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListAccountUsersRequest) (*ListUsersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListUsersResponse) []User { + return resp.Resources + } + getNextReq := func(resp *ListUsersResponse) *ListAccountUsersRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[User, string]( + iterator, + func(item User) string { + return item.Id + }) + return dedupedIterator +} + +// List users. +// +// Gets details for all the users associated with a Databricks account. +func (a *accountUsersImpl) ListAll(ctx context.Context, request ListAccountUsersRequest) ([]User, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[User, int64](ctx, iterator, request.Count) + +} + +func (a *accountUsersImpl) internalList(ctx context.Context, request ListAccountUsersRequest) (*ListUsersResponse, error) { var listUsersResponse ListUsersResponse path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -348,7 +485,52 @@ func (a *groupsImpl) Get(ctx context.Context, request GetGroupRequest) (*Group, return &group, err } -func (a *groupsImpl) List(ctx context.Context, request ListGroupsRequest) (*ListGroupsResponse, error) { +// List group details. +// +// Gets all details of the groups associated with the Databricks workspace. +func (a *groupsImpl) List(ctx context.Context, request ListGroupsRequest) listing.Iterator[Group] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListGroupsRequest) (*ListGroupsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListGroupsResponse) []Group { + return resp.Resources + } + getNextReq := func(resp *ListGroupsResponse) *ListGroupsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[Group, string]( + iterator, + func(item Group) string { + return item.Id + }) + return dedupedIterator +} + +// List group details. +// +// Gets all details of the groups associated with the Databricks workspace. +func (a *groupsImpl) ListAll(ctx context.Context, request ListGroupsRequest) ([]Group, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[Group, int64](ctx, iterator, request.Count) + +} + +func (a *groupsImpl) internalList(ctx context.Context, request ListGroupsRequest) (*ListGroupsResponse, error) { var listGroupsResponse ListGroupsResponse path := "/api/2.0/preview/scim/v2/Groups" queryParams := make(map[string]any) @@ -478,7 +660,52 @@ func (a *servicePrincipalsImpl) Get(ctx context.Context, request GetServicePrinc return &servicePrincipal, err } -func (a *servicePrincipalsImpl) List(ctx context.Context, request ListServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { +// List service principals. +// +// Gets the set of service principals associated with a Databricks workspace. +func (a *servicePrincipalsImpl) List(ctx context.Context, request ListServicePrincipalsRequest) listing.Iterator[ServicePrincipal] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListServicePrincipalResponse) []ServicePrincipal { + return resp.Resources + } + getNextReq := func(resp *ListServicePrincipalResponse) *ListServicePrincipalsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[ServicePrincipal, string]( + iterator, + func(item ServicePrincipal) string { + return item.Id + }) + return dedupedIterator +} + +// List service principals. +// +// Gets the set of service principals associated with a Databricks workspace. +func (a *servicePrincipalsImpl) ListAll(ctx context.Context, request ListServicePrincipalsRequest) ([]ServicePrincipal, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[ServicePrincipal, int64](ctx, iterator, request.Count) + +} + +func (a *servicePrincipalsImpl) internalList(ctx context.Context, request ListServicePrincipalsRequest) (*ListServicePrincipalResponse, error) { var listServicePrincipalResponse ListServicePrincipalResponse path := "/api/2.0/preview/scim/v2/ServicePrincipals" queryParams := make(map[string]any) @@ -565,7 +792,52 @@ func (a *usersImpl) GetPermissions(ctx context.Context) (*PasswordPermissions, e return &passwordPermissions, err } -func (a *usersImpl) List(ctx context.Context, request ListUsersRequest) (*ListUsersResponse, error) { +// List users. +// +// Gets details for all the users associated with a Databricks workspace. +func (a *usersImpl) List(ctx context.Context, request ListUsersRequest) listing.Iterator[User] { + + request.StartIndex = 1 // SCIM offset starts from 1 + if request.Count == 0 { + request.Count = 100 + } + getNextPage := func(ctx context.Context, req ListUsersRequest) (*ListUsersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListUsersResponse) []User { + return resp.Resources + } + getNextReq := func(resp *ListUsersResponse) *ListUsersRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.StartIndex = resp.StartIndex + int64(len(resp.Resources)) + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[User, string]( + iterator, + func(item User) string { + return item.Id + }) + return dedupedIterator +} + +// List users. +// +// Gets details for all the users associated with a Databricks workspace. +func (a *usersImpl) ListAll(ctx context.Context, request ListUsersRequest) ([]User, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[User, int64](ctx, iterator, request.Count) + +} + +func (a *usersImpl) internalList(ctx context.Context, request ListUsersRequest) (*ListUsersResponse, error) { var listUsersResponse ListUsersResponse path := "/api/2.0/preview/scim/v2/Users" queryParams := make(map[string]any) @@ -644,7 +916,38 @@ func (a *workspaceAssignmentImpl) Get(ctx context.Context, request GetWorkspaceA return &workspacePermissions, err } -func (a *workspaceAssignmentImpl) List(ctx context.Context, request ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { +// Get permission assignments. +// +// Get the permission assignments for the specified Databricks account and +// Databricks workspace. +func (a *workspaceAssignmentImpl) List(ctx context.Context, request ListWorkspaceAssignmentRequest) listing.Iterator[PermissionAssignment] { + + getNextPage := func(ctx context.Context, req ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *PermissionAssignments) []PermissionAssignment { + return resp.PermissionAssignments + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get permission assignments. +// +// Get the permission assignments for the specified Databricks account and +// Databricks workspace. +func (a *workspaceAssignmentImpl) ListAll(ctx context.Context, request ListWorkspaceAssignmentRequest) ([]PermissionAssignment, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PermissionAssignment](ctx, iterator) +} + +func (a *workspaceAssignmentImpl) internalList(ctx context.Context, request ListWorkspaceAssignmentRequest) (*PermissionAssignments, error) { var permissionAssignments PermissionAssignments path := fmt.Sprintf("/api/2.0/accounts/%v/workspaces/%v/permissionassignments", a.client.ConfiguredAccountID(), request.WorkspaceId) queryParams := make(map[string]any) diff --git a/service/jobs/api.go b/service/jobs/api.go index fb4f0ef56..bbb59f854 100755 --- a/service/jobs/api.go +++ b/service/jobs/api.go @@ -497,45 +497,6 @@ func (a *JobsAPI) GetRunOutputByRunId(ctx context.Context, runId int64) (*RunOut }) } -// List jobs. -// -// Retrieves a list of jobs. -// -// This method is generated by Databricks SDK Code Generator. -func (a *JobsAPI) List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob] { - - getNextPage := func(ctx context.Context, req ListJobsRequest) (*ListJobsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.jobsImpl.List(ctx, req) - } - getItems := func(resp *ListJobsResponse) []BaseJob { - return resp.Jobs - } - getNextReq := func(resp *ListJobsResponse) *ListJobsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List jobs. -// -// Retrieves a list of jobs. -// -// This method is generated by Databricks SDK Code Generator. -func (a *JobsAPI) ListAll(ctx context.Context, request ListJobsRequest) ([]BaseJob, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[BaseJob](ctx, iterator) -} - // BaseJobSettingsNameToJobIdMap calls [JobsAPI.ListAll] and creates a map of results with [BaseJob].Settings.Name as key and [BaseJob].JobId as value. // // Returns an error if there's more than one [BaseJob] with the same .Settings.Name. @@ -589,45 +550,6 @@ func (a *JobsAPI) GetBySettingsName(ctx context.Context, name string) (*BaseJob, return &alternatives[0], nil } -// List job runs. -// -// List runs in descending order by start time. -// -// This method is generated by Databricks SDK Code Generator. -func (a *JobsAPI) ListRuns(ctx context.Context, request ListRunsRequest) listing.Iterator[BaseRun] { - - getNextPage := func(ctx context.Context, req ListRunsRequest) (*ListRunsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.jobsImpl.ListRuns(ctx, req) - } - getItems := func(resp *ListRunsResponse) []BaseRun { - return resp.Runs - } - getNextReq := func(resp *ListRunsResponse) *ListRunsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List job runs. -// -// List runs in descending order by start time. -// -// This method is generated by Databricks SDK Code Generator. -func (a *JobsAPI) ListRunsAll(ctx context.Context, request ListRunsRequest) ([]BaseRun, error) { - iterator := a.ListRuns(ctx, request) - return listing.ToSlice[BaseRun](ctx, iterator) -} - // Repair a job run. // // Re-run one or more tasks. Tasks are re-run as part of the original job run. @@ -854,48 +776,3 @@ func (a *PolicyComplianceForJobsAPI) GetComplianceByJobId(ctx context.Context, j JobId: jobId, }) } - -// List job policy compliance. -// -// Returns the policy compliance status of all jobs that use a given policy. -// Jobs could be out of compliance if a cluster policy they use was updated -// after the job was last edited and its job clusters no longer comply with the -// updated policy. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PolicyComplianceForJobsAPI) ListCompliance(ctx context.Context, request ListJobComplianceRequest) listing.Iterator[JobCompliance] { - - getNextPage := func(ctx context.Context, req ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.policyComplianceForJobsImpl.ListCompliance(ctx, req) - } - getItems := func(resp *ListJobComplianceForPolicyResponse) []JobCompliance { - return resp.Jobs - } - getNextReq := func(resp *ListJobComplianceForPolicyResponse) *ListJobComplianceRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List job policy compliance. -// -// Returns the policy compliance status of all jobs that use a given policy. -// Jobs could be out of compliance if a cluster policy they use was updated -// after the job was last edited and its job clusters no longer comply with the -// updated policy. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PolicyComplianceForJobsAPI) ListComplianceAll(ctx context.Context, request ListJobComplianceRequest) ([]JobCompliance, error) { - iterator := a.ListCompliance(ctx, request) - return listing.ToSlice[JobCompliance](ctx, iterator) -} diff --git a/service/jobs/impl.go b/service/jobs/impl.go index d381d8876..53a9816a5 100755 --- a/service/jobs/impl.go +++ b/service/jobs/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just Jobs API methods @@ -130,7 +132,42 @@ func (a *jobsImpl) GetRunOutput(ctx context.Context, request GetRunOutputRequest return &runOutput, err } -func (a *jobsImpl) List(ctx context.Context, request ListJobsRequest) (*ListJobsResponse, error) { +// List jobs. +// +// Retrieves a list of jobs. +func (a *jobsImpl) List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob] { + + getNextPage := func(ctx context.Context, req ListJobsRequest) (*ListJobsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListJobsResponse) []BaseJob { + return resp.Jobs + } + getNextReq := func(resp *ListJobsResponse) *ListJobsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List jobs. +// +// Retrieves a list of jobs. +func (a *jobsImpl) ListAll(ctx context.Context, request ListJobsRequest) ([]BaseJob, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[BaseJob](ctx, iterator) +} + +func (a *jobsImpl) internalList(ctx context.Context, request ListJobsRequest) (*ListJobsResponse, error) { var listJobsResponse ListJobsResponse path := "/api/2.1/jobs/list" queryParams := make(map[string]any) @@ -140,7 +177,42 @@ func (a *jobsImpl) List(ctx context.Context, request ListJobsRequest) (*ListJobs return &listJobsResponse, err } -func (a *jobsImpl) ListRuns(ctx context.Context, request ListRunsRequest) (*ListRunsResponse, error) { +// List job runs. +// +// List runs in descending order by start time. +func (a *jobsImpl) ListRuns(ctx context.Context, request ListRunsRequest) listing.Iterator[BaseRun] { + + getNextPage := func(ctx context.Context, req ListRunsRequest) (*ListRunsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListRuns(ctx, req) + } + getItems := func(resp *ListRunsResponse) []BaseRun { + return resp.Runs + } + getNextReq := func(resp *ListRunsResponse) *ListRunsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List job runs. +// +// List runs in descending order by start time. +func (a *jobsImpl) ListRunsAll(ctx context.Context, request ListRunsRequest) ([]BaseRun, error) { + iterator := a.ListRuns(ctx, request) + return listing.ToSlice[BaseRun](ctx, iterator) +} + +func (a *jobsImpl) internalListRuns(ctx context.Context, request ListRunsRequest) (*ListRunsResponse, error) { var listRunsResponse ListRunsResponse path := "/api/2.1/jobs/runs/list" queryParams := make(map[string]any) @@ -253,7 +325,48 @@ func (a *policyComplianceForJobsImpl) GetCompliance(ctx context.Context, request return &getPolicyComplianceResponse, err } -func (a *policyComplianceForJobsImpl) ListCompliance(ctx context.Context, request ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { +// List job policy compliance. +// +// Returns the policy compliance status of all jobs that use a given policy. +// Jobs could be out of compliance if a cluster policy they use was updated +// after the job was last edited and its job clusters no longer comply with the +// updated policy. +func (a *policyComplianceForJobsImpl) ListCompliance(ctx context.Context, request ListJobComplianceRequest) listing.Iterator[JobCompliance] { + + getNextPage := func(ctx context.Context, req ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListCompliance(ctx, req) + } + getItems := func(resp *ListJobComplianceForPolicyResponse) []JobCompliance { + return resp.Jobs + } + getNextReq := func(resp *ListJobComplianceForPolicyResponse) *ListJobComplianceRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List job policy compliance. +// +// Returns the policy compliance status of all jobs that use a given policy. +// Jobs could be out of compliance if a cluster policy they use was updated +// after the job was last edited and its job clusters no longer comply with the +// updated policy. +func (a *policyComplianceForJobsImpl) ListComplianceAll(ctx context.Context, request ListJobComplianceRequest) ([]JobCompliance, error) { + iterator := a.ListCompliance(ctx, request) + return listing.ToSlice[JobCompliance](ctx, iterator) +} + +func (a *policyComplianceForJobsImpl) internalListCompliance(ctx context.Context, request ListJobComplianceRequest) (*ListJobComplianceForPolicyResponse, error) { var listJobComplianceForPolicyResponse ListJobComplianceForPolicyResponse path := "/api/2.0/policies/jobs/list-compliance" queryParams := make(map[string]any) diff --git a/service/marketplace/api.go b/service/marketplace/api.go index 7a2009738..3bd903bae 100755 --- a/service/marketplace/api.go +++ b/service/marketplace/api.go @@ -78,101 +78,15 @@ type ConsumerFulfillmentsAPI struct { consumerFulfillmentsImpl } -// Get listing content metadata. -// -// Get a high level preview of the metadata of listing installable content. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerFulfillmentsAPI) Get(ctx context.Context, request GetListingContentMetadataRequest) listing.Iterator[SharedDataObject] { - - getNextPage := func(ctx context.Context, req GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.consumerFulfillmentsImpl.Get(ctx, req) - } - getItems := func(resp *GetListingContentMetadataResponse) []SharedDataObject { - return resp.SharedDataObjects - } - getNextReq := func(resp *GetListingContentMetadataResponse) *GetListingContentMetadataRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Get listing content metadata. -// -// Get a high level preview of the metadata of listing installable content. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerFulfillmentsAPI) GetAll(ctx context.Context, request GetListingContentMetadataRequest) ([]SharedDataObject, error) { - iterator := a.Get(ctx, request) - return listing.ToSlice[SharedDataObject](ctx, iterator) -} - // Get listing content metadata. // // Get a high level preview of the metadata of listing installable content. func (a *ConsumerFulfillmentsAPI) GetByListingId(ctx context.Context, listingId string) (*GetListingContentMetadataResponse, error) { - return a.consumerFulfillmentsImpl.Get(ctx, GetListingContentMetadataRequest{ + return a.consumerFulfillmentsImpl.internalGet(ctx, GetListingContentMetadataRequest{ ListingId: listingId, }) } -// List all listing fulfillments. -// -// Get all listings fulfillments associated with a listing. A _fulfillment_ is a -// potential installation. Standard installations contain metadata about the -// attached share or git repo. Only one of these fields will be present. -// Personalized installations contain metadata about the attached share or git -// repo, as well as the Delta Sharing recipient type. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerFulfillmentsAPI) List(ctx context.Context, request ListFulfillmentsRequest) listing.Iterator[ListingFulfillment] { - - getNextPage := func(ctx context.Context, req ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.consumerFulfillmentsImpl.List(ctx, req) - } - getItems := func(resp *ListFulfillmentsResponse) []ListingFulfillment { - return resp.Fulfillments - } - getNextReq := func(resp *ListFulfillmentsResponse) *ListFulfillmentsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List all listing fulfillments. -// -// Get all listings fulfillments associated with a listing. A _fulfillment_ is a -// potential installation. Standard installations contain metadata about the -// attached share or git repo. Only one of these fields will be present. -// Personalized installations contain metadata about the attached share or git -// repo, as well as the Delta Sharing recipient type. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerFulfillmentsAPI) ListAll(ctx context.Context, request ListFulfillmentsRequest) ([]ListingFulfillment, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ListingFulfillment](ctx, iterator) -} - // List all listing fulfillments. // // Get all listings fulfillments associated with a listing. A _fulfillment_ is a @@ -181,7 +95,7 @@ func (a *ConsumerFulfillmentsAPI) ListAll(ctx context.Context, request ListFulfi // Personalized installations contain metadata about the attached share or git // repo, as well as the Delta Sharing recipient type. func (a *ConsumerFulfillmentsAPI) ListByListingId(ctx context.Context, listingId string) (*ListFulfillmentsResponse, error) { - return a.consumerFulfillmentsImpl.List(ctx, ListFulfillmentsRequest{ + return a.consumerFulfillmentsImpl.internalList(ctx, ListFulfillmentsRequest{ ListingId: listingId, }) } @@ -270,89 +184,11 @@ func (a *ConsumerInstallationsAPI) DeleteByListingIdAndInstallationId(ctx contex }) } -// List all installations. -// -// List all installations across all listings. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerInstallationsAPI) List(ctx context.Context, request ListAllInstallationsRequest) listing.Iterator[InstallationDetail] { - - getNextPage := func(ctx context.Context, req ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.consumerInstallationsImpl.List(ctx, req) - } - getItems := func(resp *ListAllInstallationsResponse) []InstallationDetail { - return resp.Installations - } - getNextReq := func(resp *ListAllInstallationsResponse) *ListAllInstallationsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List all installations. -// -// List all installations across all listings. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerInstallationsAPI) ListAll(ctx context.Context, request ListAllInstallationsRequest) ([]InstallationDetail, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[InstallationDetail](ctx, iterator) -} - -// List installations for a listing. -// -// List all installations for a particular listing. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerInstallationsAPI) ListListingInstallations(ctx context.Context, request ListInstallationsRequest) listing.Iterator[InstallationDetail] { - - getNextPage := func(ctx context.Context, req ListInstallationsRequest) (*ListInstallationsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.consumerInstallationsImpl.ListListingInstallations(ctx, req) - } - getItems := func(resp *ListInstallationsResponse) []InstallationDetail { - return resp.Installations - } - getNextReq := func(resp *ListInstallationsResponse) *ListInstallationsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List installations for a listing. -// -// List all installations for a particular listing. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerInstallationsAPI) ListListingInstallationsAll(ctx context.Context, request ListInstallationsRequest) ([]InstallationDetail, error) { - iterator := a.ListListingInstallations(ctx, request) - return listing.ToSlice[InstallationDetail](ctx, iterator) -} - // List installations for a listing. // // List all installations for a particular listing. func (a *ConsumerInstallationsAPI) ListListingInstallationsByListingId(ctx context.Context, listingId string) (*ListInstallationsResponse, error) { - return a.consumerInstallationsImpl.ListListingInstallations(ctx, ListInstallationsRequest{ + return a.consumerInstallationsImpl.internalListListingInstallations(ctx, ListInstallationsRequest{ ListingId: listingId, }) } @@ -454,47 +290,6 @@ func (a *ConsumerListingsAPI) GetById(ctx context.Context, id string) (*GetListi }) } -// List listings. -// -// List all published listings in the Databricks Marketplace that the consumer -// has access to. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerListingsAPI) List(ctx context.Context, request ListListingsRequest) listing.Iterator[Listing] { - - getNextPage := func(ctx context.Context, req ListListingsRequest) (*ListListingsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.consumerListingsImpl.List(ctx, req) - } - getItems := func(resp *ListListingsResponse) []Listing { - return resp.Listings - } - getNextReq := func(resp *ListListingsResponse) *ListListingsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List listings. -// -// List all published listings in the Databricks Marketplace that the consumer -// has access to. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerListingsAPI) ListAll(ctx context.Context, request ListListingsRequest) ([]Listing, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[Listing](ctx, iterator) -} - // ListingSummaryNameToIdMap calls [ConsumerListingsAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. @@ -548,49 +343,6 @@ func (a *ConsumerListingsAPI) GetBySummaryName(ctx context.Context, name string) return &alternatives[0], nil } -// Search listings. -// -// Search published listings in the Databricks Marketplace that the consumer has -// access to. This query supports a variety of different search parameters and -// performs fuzzy matching. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerListingsAPI) Search(ctx context.Context, request SearchListingsRequest) listing.Iterator[Listing] { - - getNextPage := func(ctx context.Context, req SearchListingsRequest) (*SearchListingsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.consumerListingsImpl.Search(ctx, req) - } - getItems := func(resp *SearchListingsResponse) []Listing { - return resp.Listings - } - getNextReq := func(resp *SearchListingsResponse) *SearchListingsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Search listings. -// -// Search published listings in the Databricks Marketplace that the consumer has -// access to. This query supports a variety of different search parameters and -// performs fuzzy matching. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerListingsAPI) SearchAll(ctx context.Context, request SearchListingsRequest) ([]Listing, error) { - iterator := a.Search(ctx, request) - return listing.ToSlice[Listing](ctx, iterator) -} - type ConsumerPersonalizationRequestsInterface interface { // Create a personalization request. @@ -649,45 +401,6 @@ func (a *ConsumerPersonalizationRequestsAPI) GetByListingId(ctx context.Context, }) } -// List all personalization requests. -// -// List personalization requests for a consumer across all listings. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerPersonalizationRequestsAPI) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { - - getNextPage := func(ctx context.Context, req ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.consumerPersonalizationRequestsImpl.List(ctx, req) - } - getItems := func(resp *ListAllPersonalizationRequestsResponse) []PersonalizationRequest { - return resp.PersonalizationRequests - } - getNextReq := func(resp *ListAllPersonalizationRequestsResponse) *ListAllPersonalizationRequestsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List all personalization requests. -// -// List personalization requests for a consumer across all listings. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerPersonalizationRequestsAPI) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[PersonalizationRequest](ctx, iterator) -} - type ConsumerProvidersInterface interface { // Get one batch of providers. One may specify up to 50 IDs per request. @@ -766,47 +479,6 @@ func (a *ConsumerProvidersAPI) GetById(ctx context.Context, id string) (*GetProv }) } -// List providers. -// -// List all providers in the Databricks Marketplace with at least one visible -// listing. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerProvidersAPI) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { - - getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.consumerProvidersImpl.List(ctx, req) - } - getItems := func(resp *ListProvidersResponse) []ProviderInfo { - return resp.Providers - } - getNextReq := func(resp *ListProvidersResponse) *ListProvidersRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List providers. -// -// List all providers in the Databricks Marketplace with at least one visible -// listing. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ConsumerProvidersAPI) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ProviderInfo](ctx, iterator) -} - // ProviderInfoNameToIdMap calls [ConsumerProvidersAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. @@ -937,45 +609,6 @@ func (a *ProviderExchangeFiltersAPI) DeleteById(ctx context.Context, id string) }) } -// List exchange filters. -// -// # List exchange filter -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangeFiltersAPI) List(ctx context.Context, request ListExchangeFiltersRequest) listing.Iterator[ExchangeFilter] { - - getNextPage := func(ctx context.Context, req ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providerExchangeFiltersImpl.List(ctx, req) - } - getItems := func(resp *ListExchangeFiltersResponse) []ExchangeFilter { - return resp.Filters - } - getNextReq := func(resp *ListExchangeFiltersResponse) *ListExchangeFiltersRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List exchange filters. -// -// # List exchange filter -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangeFiltersAPI) ListAll(ctx context.Context, request ListExchangeFiltersRequest) ([]ExchangeFilter, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ExchangeFilter](ctx, iterator) -} - // ExchangeFilterNameToIdMap calls [ProviderExchangeFiltersAPI.ListAll] and creates a map of results with [ExchangeFilter].Name as key and [ExchangeFilter].Id as value. // // Returns an error if there's more than one [ExchangeFilter] with the same .Name. @@ -1214,45 +847,6 @@ func (a *ProviderExchangesAPI) GetById(ctx context.Context, id string) (*GetExch }) } -// List exchanges. -// -// # List exchanges visible to provider -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesAPI) List(ctx context.Context, request ListExchangesRequest) listing.Iterator[Exchange] { - - getNextPage := func(ctx context.Context, req ListExchangesRequest) (*ListExchangesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providerExchangesImpl.List(ctx, req) - } - getItems := func(resp *ListExchangesResponse) []Exchange { - return resp.Exchanges - } - getNextReq := func(resp *ListExchangesResponse) *ListExchangesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List exchanges. -// -// # List exchanges visible to provider -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesAPI) ListAll(ctx context.Context, request ListExchangesRequest) ([]Exchange, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[Exchange](ctx, iterator) -} - // ExchangeNameToIdMap calls [ProviderExchangesAPI.ListAll] and creates a map of results with [Exchange].Name as key and [Exchange].Id as value. // // Returns an error if there's more than one [Exchange] with the same .Name. @@ -1306,45 +900,6 @@ func (a *ProviderExchangesAPI) GetByName(ctx context.Context, name string) (*Exc return &alternatives[0], nil } -// List exchanges for listing. -// -// # List exchanges associated with a listing -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesAPI) ListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) listing.Iterator[ExchangeListing] { - - getNextPage := func(ctx context.Context, req ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providerExchangesImpl.ListExchangesForListing(ctx, req) - } - getItems := func(resp *ListExchangesForListingResponse) []ExchangeListing { - return resp.ExchangeListing - } - getNextReq := func(resp *ListExchangesForListingResponse) *ListExchangesForListingRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List exchanges for listing. -// -// # List exchanges associated with a listing -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesAPI) ListExchangesForListingAll(ctx context.Context, request ListExchangesForListingRequest) ([]ExchangeListing, error) { - iterator := a.ListExchangesForListing(ctx, request) - return listing.ToSlice[ExchangeListing](ctx, iterator) -} - // ExchangeListingExchangeNameToExchangeIdMap calls [ProviderExchangesAPI.ListExchangesForListingAll] and creates a map of results with [ExchangeListing].ExchangeName as key and [ExchangeListing].ExchangeId as value. // // Returns an error if there's more than one [ExchangeListing] with the same .ExchangeName. @@ -1398,45 +953,6 @@ func (a *ProviderExchangesAPI) GetByExchangeName(ctx context.Context, name strin return &alternatives[0], nil } -// List listings for exchange. -// -// # List listings associated with an exchange -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesAPI) ListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) listing.Iterator[ExchangeListing] { - - getNextPage := func(ctx context.Context, req ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providerExchangesImpl.ListListingsForExchange(ctx, req) - } - getItems := func(resp *ListListingsForExchangeResponse) []ExchangeListing { - return resp.ExchangeListings - } - getNextReq := func(resp *ListListingsForExchangeResponse) *ListListingsForExchangeRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List listings for exchange. -// -// # List listings associated with an exchange -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderExchangesAPI) ListListingsForExchangeAll(ctx context.Context, request ListListingsForExchangeRequest) ([]ExchangeListing, error) { - iterator := a.ListListingsForExchange(ctx, request) - return listing.ToSlice[ExchangeListing](ctx, iterator) -} - // ExchangeListingListingNameToListingIdMap calls [ProviderExchangesAPI.ListListingsForExchangeAll] and creates a map of results with [ExchangeListing].ListingName as key and [ExchangeListing].ListingId as value. // // Returns an error if there's more than one [ExchangeListing] with the same .ListingName. @@ -1583,45 +1099,6 @@ func (a *ProviderFilesAPI) GetByFileId(ctx context.Context, fileId string) (*Get }) } -// List files. -// -// List files attached to a parent entity. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderFilesAPI) List(ctx context.Context, request ListFilesRequest) listing.Iterator[FileInfo] { - - getNextPage := func(ctx context.Context, req ListFilesRequest) (*ListFilesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providerFilesImpl.List(ctx, req) - } - getItems := func(resp *ListFilesResponse) []FileInfo { - return resp.FileInfos - } - getNextReq := func(resp *ListFilesResponse) *ListFilesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List files. -// -// List files attached to a parent entity. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderFilesAPI) ListAll(ctx context.Context, request ListFilesRequest) ([]FileInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[FileInfo](ctx, iterator) -} - // FileInfoDisplayNameToIdMap calls [ProviderFilesAPI.ListAll] and creates a map of results with [FileInfo].DisplayName as key and [FileInfo].Id as value. // // Returns an error if there's more than one [FileInfo] with the same .DisplayName. @@ -1772,45 +1249,6 @@ func (a *ProviderListingsAPI) GetById(ctx context.Context, id string) (*GetListi }) } -// List listings. -// -// # List listings owned by this provider -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderListingsAPI) List(ctx context.Context, request GetListingsRequest) listing.Iterator[Listing] { - - getNextPage := func(ctx context.Context, req GetListingsRequest) (*GetListingsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providerListingsImpl.List(ctx, req) - } - getItems := func(resp *GetListingsResponse) []Listing { - return resp.Listings - } - getNextReq := func(resp *GetListingsResponse) *GetListingsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List listings. -// -// # List listings owned by this provider -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderListingsAPI) ListAll(ctx context.Context, request GetListingsRequest) ([]Listing, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[Listing](ctx, iterator) -} - // ListingSummaryNameToIdMap calls [ProviderListingsAPI.ListAll] and creates a map of results with [Listing].Summary.Name as key and [Listing].Id as value. // // Returns an error if there's more than one [Listing] with the same .Summary.Name. @@ -1903,47 +1341,6 @@ type ProviderPersonalizationRequestsAPI struct { providerPersonalizationRequestsImpl } -// All personalization requests across all listings. -// -// List personalization requests to this provider. This will return all -// personalization requests, regardless of which listing they are for. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderPersonalizationRequestsAPI) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { - - getNextPage := func(ctx context.Context, req ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providerPersonalizationRequestsImpl.List(ctx, req) - } - getItems := func(resp *ListAllPersonalizationRequestsResponse) []PersonalizationRequest { - return resp.PersonalizationRequests - } - getNextReq := func(resp *ListAllPersonalizationRequestsResponse) *ListAllPersonalizationRequestsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// All personalization requests across all listings. -// -// List personalization requests to this provider. This will return all -// personalization requests, regardless of which listing they are for. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderPersonalizationRequestsAPI) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[PersonalizationRequest](ctx, iterator) -} - type ProviderProviderAnalyticsDashboardsInterface interface { // Create provider analytics dashboard. @@ -2077,45 +1474,6 @@ func (a *ProviderProvidersAPI) GetById(ctx context.Context, id string) (*GetProv }) } -// List providers. -// -// List provider profiles for account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderProvidersAPI) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { - - getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providerProvidersImpl.List(ctx, req) - } - getItems := func(resp *ListProvidersResponse) []ProviderInfo { - return resp.Providers - } - getNextReq := func(resp *ListProvidersResponse) *ListProvidersRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List providers. -// -// List provider profiles for account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProviderProvidersAPI) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ProviderInfo](ctx, iterator) -} - // ProviderInfoNameToIdMap calls [ProviderProvidersAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].Id as value. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. diff --git a/service/marketplace/impl.go b/service/marketplace/impl.go index 278da15c6..0b54a882d 100755 --- a/service/marketplace/impl.go +++ b/service/marketplace/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just ConsumerFulfillments API methods @@ -15,7 +17,42 @@ type consumerFulfillmentsImpl struct { client *client.DatabricksClient } -func (a *consumerFulfillmentsImpl) Get(ctx context.Context, request GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { +// Get listing content metadata. +// +// Get a high level preview of the metadata of listing installable content. +func (a *consumerFulfillmentsImpl) Get(ctx context.Context, request GetListingContentMetadataRequest) listing.Iterator[SharedDataObject] { + + getNextPage := func(ctx context.Context, req GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalGet(ctx, req) + } + getItems := func(resp *GetListingContentMetadataResponse) []SharedDataObject { + return resp.SharedDataObjects + } + getNextReq := func(resp *GetListingContentMetadataResponse) *GetListingContentMetadataRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get listing content metadata. +// +// Get a high level preview of the metadata of listing installable content. +func (a *consumerFulfillmentsImpl) GetAll(ctx context.Context, request GetListingContentMetadataRequest) ([]SharedDataObject, error) { + iterator := a.Get(ctx, request) + return listing.ToSlice[SharedDataObject](ctx, iterator) +} + +func (a *consumerFulfillmentsImpl) internalGet(ctx context.Context, request GetListingContentMetadataRequest) (*GetListingContentMetadataResponse, error) { var getListingContentMetadataResponse GetListingContentMetadataResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/content", request.ListingId) queryParams := make(map[string]any) @@ -25,7 +62,50 @@ func (a *consumerFulfillmentsImpl) Get(ctx context.Context, request GetListingCo return &getListingContentMetadataResponse, err } -func (a *consumerFulfillmentsImpl) List(ctx context.Context, request ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { +// List all listing fulfillments. +// +// Get all listings fulfillments associated with a listing. A _fulfillment_ is a +// potential installation. Standard installations contain metadata about the +// attached share or git repo. Only one of these fields will be present. +// Personalized installations contain metadata about the attached share or git +// repo, as well as the Delta Sharing recipient type. +func (a *consumerFulfillmentsImpl) List(ctx context.Context, request ListFulfillmentsRequest) listing.Iterator[ListingFulfillment] { + + getNextPage := func(ctx context.Context, req ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFulfillmentsResponse) []ListingFulfillment { + return resp.Fulfillments + } + getNextReq := func(resp *ListFulfillmentsResponse) *ListFulfillmentsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all listing fulfillments. +// +// Get all listings fulfillments associated with a listing. A _fulfillment_ is a +// potential installation. Standard installations contain metadata about the +// attached share or git repo. Only one of these fields will be present. +// Personalized installations contain metadata about the attached share or git +// repo, as well as the Delta Sharing recipient type. +func (a *consumerFulfillmentsImpl) ListAll(ctx context.Context, request ListFulfillmentsRequest) ([]ListingFulfillment, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ListingFulfillment](ctx, iterator) +} + +func (a *consumerFulfillmentsImpl) internalList(ctx context.Context, request ListFulfillmentsRequest) (*ListFulfillmentsResponse, error) { var listFulfillmentsResponse ListFulfillmentsResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/fulfillments", request.ListingId) queryParams := make(map[string]any) @@ -61,7 +141,42 @@ func (a *consumerInstallationsImpl) Delete(ctx context.Context, request DeleteIn return err } -func (a *consumerInstallationsImpl) List(ctx context.Context, request ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { +// List all installations. +// +// List all installations across all listings. +func (a *consumerInstallationsImpl) List(ctx context.Context, request ListAllInstallationsRequest) listing.Iterator[InstallationDetail] { + + getNextPage := func(ctx context.Context, req ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAllInstallationsResponse) []InstallationDetail { + return resp.Installations + } + getNextReq := func(resp *ListAllInstallationsResponse) *ListAllInstallationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all installations. +// +// List all installations across all listings. +func (a *consumerInstallationsImpl) ListAll(ctx context.Context, request ListAllInstallationsRequest) ([]InstallationDetail, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[InstallationDetail](ctx, iterator) +} + +func (a *consumerInstallationsImpl) internalList(ctx context.Context, request ListAllInstallationsRequest) (*ListAllInstallationsResponse, error) { var listAllInstallationsResponse ListAllInstallationsResponse path := "/api/2.1/marketplace-consumer/installations" queryParams := make(map[string]any) @@ -71,7 +186,42 @@ func (a *consumerInstallationsImpl) List(ctx context.Context, request ListAllIns return &listAllInstallationsResponse, err } -func (a *consumerInstallationsImpl) ListListingInstallations(ctx context.Context, request ListInstallationsRequest) (*ListInstallationsResponse, error) { +// List installations for a listing. +// +// List all installations for a particular listing. +func (a *consumerInstallationsImpl) ListListingInstallations(ctx context.Context, request ListInstallationsRequest) listing.Iterator[InstallationDetail] { + + getNextPage := func(ctx context.Context, req ListInstallationsRequest) (*ListInstallationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListListingInstallations(ctx, req) + } + getItems := func(resp *ListInstallationsResponse) []InstallationDetail { + return resp.Installations + } + getNextReq := func(resp *ListInstallationsResponse) *ListInstallationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List installations for a listing. +// +// List all installations for a particular listing. +func (a *consumerInstallationsImpl) ListListingInstallationsAll(ctx context.Context, request ListInstallationsRequest) ([]InstallationDetail, error) { + iterator := a.ListListingInstallations(ctx, request) + return listing.ToSlice[InstallationDetail](ctx, iterator) +} + +func (a *consumerInstallationsImpl) internalListListingInstallations(ctx context.Context, request ListInstallationsRequest) (*ListInstallationsResponse, error) { var listInstallationsResponse ListInstallationsResponse path := fmt.Sprintf("/api/2.1/marketplace-consumer/listings/%v/installations", request.ListingId) queryParams := make(map[string]any) @@ -117,7 +267,44 @@ func (a *consumerListingsImpl) Get(ctx context.Context, request GetListingReques return &getListingResponse, err } -func (a *consumerListingsImpl) List(ctx context.Context, request ListListingsRequest) (*ListListingsResponse, error) { +// List listings. +// +// List all published listings in the Databricks Marketplace that the consumer +// has access to. +func (a *consumerListingsImpl) List(ctx context.Context, request ListListingsRequest) listing.Iterator[Listing] { + + getNextPage := func(ctx context.Context, req ListListingsRequest) (*ListListingsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListListingsResponse) []Listing { + return resp.Listings + } + getNextReq := func(resp *ListListingsResponse) *ListListingsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List listings. +// +// List all published listings in the Databricks Marketplace that the consumer +// has access to. +func (a *consumerListingsImpl) ListAll(ctx context.Context, request ListListingsRequest) ([]Listing, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Listing](ctx, iterator) +} + +func (a *consumerListingsImpl) internalList(ctx context.Context, request ListListingsRequest) (*ListListingsResponse, error) { var listListingsResponse ListListingsResponse path := "/api/2.1/marketplace-consumer/listings" queryParams := make(map[string]any) @@ -127,7 +314,46 @@ func (a *consumerListingsImpl) List(ctx context.Context, request ListListingsReq return &listListingsResponse, err } -func (a *consumerListingsImpl) Search(ctx context.Context, request SearchListingsRequest) (*SearchListingsResponse, error) { +// Search listings. +// +// Search published listings in the Databricks Marketplace that the consumer has +// access to. This query supports a variety of different search parameters and +// performs fuzzy matching. +func (a *consumerListingsImpl) Search(ctx context.Context, request SearchListingsRequest) listing.Iterator[Listing] { + + getNextPage := func(ctx context.Context, req SearchListingsRequest) (*SearchListingsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearch(ctx, req) + } + getItems := func(resp *SearchListingsResponse) []Listing { + return resp.Listings + } + getNextReq := func(resp *SearchListingsResponse) *SearchListingsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Search listings. +// +// Search published listings in the Databricks Marketplace that the consumer has +// access to. This query supports a variety of different search parameters and +// performs fuzzy matching. +func (a *consumerListingsImpl) SearchAll(ctx context.Context, request SearchListingsRequest) ([]Listing, error) { + iterator := a.Search(ctx, request) + return listing.ToSlice[Listing](ctx, iterator) +} + +func (a *consumerListingsImpl) internalSearch(ctx context.Context, request SearchListingsRequest) (*SearchListingsResponse, error) { var searchListingsResponse SearchListingsResponse path := "/api/2.1/marketplace-consumer/search-listings" queryParams := make(map[string]any) @@ -163,7 +389,42 @@ func (a *consumerPersonalizationRequestsImpl) Get(ctx context.Context, request G return &getPersonalizationRequestResponse, err } -func (a *consumerPersonalizationRequestsImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { +// List all personalization requests. +// +// List personalization requests for a consumer across all listings. +func (a *consumerPersonalizationRequestsImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { + + getNextPage := func(ctx context.Context, req ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAllPersonalizationRequestsResponse) []PersonalizationRequest { + return resp.PersonalizationRequests + } + getNextReq := func(resp *ListAllPersonalizationRequestsResponse) *ListAllPersonalizationRequestsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all personalization requests. +// +// List personalization requests for a consumer across all listings. +func (a *consumerPersonalizationRequestsImpl) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PersonalizationRequest](ctx, iterator) +} + +func (a *consumerPersonalizationRequestsImpl) internalList(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { var listAllPersonalizationRequestsResponse ListAllPersonalizationRequestsResponse path := "/api/2.1/marketplace-consumer/personalization-requests" queryParams := make(map[string]any) @@ -198,7 +459,44 @@ func (a *consumerProvidersImpl) Get(ctx context.Context, request GetProviderRequ return &getProviderResponse, err } -func (a *consumerProvidersImpl) List(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { +// List providers. +// +// List all providers in the Databricks Marketplace with at least one visible +// listing. +func (a *consumerProvidersImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { + + getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListProvidersResponse) []ProviderInfo { + return resp.Providers + } + getNextReq := func(resp *ListProvidersResponse) *ListProvidersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List providers. +// +// List all providers in the Databricks Marketplace with at least one visible +// listing. +func (a *consumerProvidersImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ProviderInfo](ctx, iterator) +} + +func (a *consumerProvidersImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { var listProvidersResponse ListProvidersResponse path := "/api/2.1/marketplace-consumer/providers" queryParams := make(map[string]any) @@ -234,7 +532,42 @@ func (a *providerExchangeFiltersImpl) Delete(ctx context.Context, request Delete return err } -func (a *providerExchangeFiltersImpl) List(ctx context.Context, request ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { +// List exchange filters. +// +// List exchange filter +func (a *providerExchangeFiltersImpl) List(ctx context.Context, request ListExchangeFiltersRequest) listing.Iterator[ExchangeFilter] { + + getNextPage := func(ctx context.Context, req ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListExchangeFiltersResponse) []ExchangeFilter { + return resp.Filters + } + getNextReq := func(resp *ListExchangeFiltersResponse) *ListExchangeFiltersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List exchange filters. +// +// List exchange filter +func (a *providerExchangeFiltersImpl) ListAll(ctx context.Context, request ListExchangeFiltersRequest) ([]ExchangeFilter, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ExchangeFilter](ctx, iterator) +} + +func (a *providerExchangeFiltersImpl) internalList(ctx context.Context, request ListExchangeFiltersRequest) (*ListExchangeFiltersResponse, error) { var listExchangeFiltersResponse ListExchangeFiltersResponse path := "/api/2.0/marketplace-exchange/filters" queryParams := make(map[string]any) @@ -312,7 +645,42 @@ func (a *providerExchangesImpl) Get(ctx context.Context, request GetExchangeRequ return &getExchangeResponse, err } -func (a *providerExchangesImpl) List(ctx context.Context, request ListExchangesRequest) (*ListExchangesResponse, error) { +// List exchanges. +// +// List exchanges visible to provider +func (a *providerExchangesImpl) List(ctx context.Context, request ListExchangesRequest) listing.Iterator[Exchange] { + + getNextPage := func(ctx context.Context, req ListExchangesRequest) (*ListExchangesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListExchangesResponse) []Exchange { + return resp.Exchanges + } + getNextReq := func(resp *ListExchangesResponse) *ListExchangesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List exchanges. +// +// List exchanges visible to provider +func (a *providerExchangesImpl) ListAll(ctx context.Context, request ListExchangesRequest) ([]Exchange, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Exchange](ctx, iterator) +} + +func (a *providerExchangesImpl) internalList(ctx context.Context, request ListExchangesRequest) (*ListExchangesResponse, error) { var listExchangesResponse ListExchangesResponse path := "/api/2.0/marketplace-exchange/exchanges" queryParams := make(map[string]any) @@ -322,7 +690,42 @@ func (a *providerExchangesImpl) List(ctx context.Context, request ListExchangesR return &listExchangesResponse, err } -func (a *providerExchangesImpl) ListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { +// List exchanges for listing. +// +// List exchanges associated with a listing +func (a *providerExchangesImpl) ListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) listing.Iterator[ExchangeListing] { + + getNextPage := func(ctx context.Context, req ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListExchangesForListing(ctx, req) + } + getItems := func(resp *ListExchangesForListingResponse) []ExchangeListing { + return resp.ExchangeListing + } + getNextReq := func(resp *ListExchangesForListingResponse) *ListExchangesForListingRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List exchanges for listing. +// +// List exchanges associated with a listing +func (a *providerExchangesImpl) ListExchangesForListingAll(ctx context.Context, request ListExchangesForListingRequest) ([]ExchangeListing, error) { + iterator := a.ListExchangesForListing(ctx, request) + return listing.ToSlice[ExchangeListing](ctx, iterator) +} + +func (a *providerExchangesImpl) internalListExchangesForListing(ctx context.Context, request ListExchangesForListingRequest) (*ListExchangesForListingResponse, error) { var listExchangesForListingResponse ListExchangesForListingResponse path := "/api/2.0/marketplace-exchange/exchanges-for-listing" queryParams := make(map[string]any) @@ -332,7 +735,42 @@ func (a *providerExchangesImpl) ListExchangesForListing(ctx context.Context, req return &listExchangesForListingResponse, err } -func (a *providerExchangesImpl) ListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { +// List listings for exchange. +// +// List listings associated with an exchange +func (a *providerExchangesImpl) ListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) listing.Iterator[ExchangeListing] { + + getNextPage := func(ctx context.Context, req ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListListingsForExchange(ctx, req) + } + getItems := func(resp *ListListingsForExchangeResponse) []ExchangeListing { + return resp.ExchangeListings + } + getNextReq := func(resp *ListListingsForExchangeResponse) *ListListingsForExchangeRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List listings for exchange. +// +// List listings associated with an exchange +func (a *providerExchangesImpl) ListListingsForExchangeAll(ctx context.Context, request ListListingsForExchangeRequest) ([]ExchangeListing, error) { + iterator := a.ListListingsForExchange(ctx, request) + return listing.ToSlice[ExchangeListing](ctx, iterator) +} + +func (a *providerExchangesImpl) internalListListingsForExchange(ctx context.Context, request ListListingsForExchangeRequest) (*ListListingsForExchangeResponse, error) { var listListingsForExchangeResponse ListListingsForExchangeResponse path := "/api/2.0/marketplace-exchange/listings-for-exchange" queryParams := make(map[string]any) @@ -389,7 +827,42 @@ func (a *providerFilesImpl) Get(ctx context.Context, request GetFileRequest) (*G return &getFileResponse, err } -func (a *providerFilesImpl) List(ctx context.Context, request ListFilesRequest) (*ListFilesResponse, error) { +// List files. +// +// List files attached to a parent entity. +func (a *providerFilesImpl) List(ctx context.Context, request ListFilesRequest) listing.Iterator[FileInfo] { + + getNextPage := func(ctx context.Context, req ListFilesRequest) (*ListFilesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFilesResponse) []FileInfo { + return resp.FileInfos + } + getNextReq := func(resp *ListFilesResponse) *ListFilesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List files. +// +// List files attached to a parent entity. +func (a *providerFilesImpl) ListAll(ctx context.Context, request ListFilesRequest) ([]FileInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FileInfo](ctx, iterator) +} + +func (a *providerFilesImpl) internalList(ctx context.Context, request ListFilesRequest) (*ListFilesResponse, error) { var listFilesResponse ListFilesResponse path := "/api/2.0/marketplace-provider/files" queryParams := make(map[string]any) @@ -435,7 +908,42 @@ func (a *providerListingsImpl) Get(ctx context.Context, request GetListingReques return &getListingResponse, err } -func (a *providerListingsImpl) List(ctx context.Context, request GetListingsRequest) (*GetListingsResponse, error) { +// List listings. +// +// List listings owned by this provider +func (a *providerListingsImpl) List(ctx context.Context, request GetListingsRequest) listing.Iterator[Listing] { + + getNextPage := func(ctx context.Context, req GetListingsRequest) (*GetListingsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *GetListingsResponse) []Listing { + return resp.Listings + } + getNextReq := func(resp *GetListingsResponse) *GetListingsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List listings. +// +// List listings owned by this provider +func (a *providerListingsImpl) ListAll(ctx context.Context, request GetListingsRequest) ([]Listing, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[Listing](ctx, iterator) +} + +func (a *providerListingsImpl) internalList(ctx context.Context, request GetListingsRequest) (*GetListingsResponse, error) { var getListingsResponse GetListingsResponse path := "/api/2.0/marketplace-provider/listings" queryParams := make(map[string]any) @@ -461,7 +969,44 @@ type providerPersonalizationRequestsImpl struct { client *client.DatabricksClient } -func (a *providerPersonalizationRequestsImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { +// All personalization requests across all listings. +// +// List personalization requests to this provider. This will return all +// personalization requests, regardless of which listing they are for. +func (a *providerPersonalizationRequestsImpl) List(ctx context.Context, request ListAllPersonalizationRequestsRequest) listing.Iterator[PersonalizationRequest] { + + getNextPage := func(ctx context.Context, req ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAllPersonalizationRequestsResponse) []PersonalizationRequest { + return resp.PersonalizationRequests + } + getNextReq := func(resp *ListAllPersonalizationRequestsResponse) *ListAllPersonalizationRequestsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// All personalization requests across all listings. +// +// List personalization requests to this provider. This will return all +// personalization requests, regardless of which listing they are for. +func (a *providerPersonalizationRequestsImpl) ListAll(ctx context.Context, request ListAllPersonalizationRequestsRequest) ([]PersonalizationRequest, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PersonalizationRequest](ctx, iterator) +} + +func (a *providerPersonalizationRequestsImpl) internalList(ctx context.Context, request ListAllPersonalizationRequestsRequest) (*ListAllPersonalizationRequestsResponse, error) { var listAllPersonalizationRequestsResponse ListAllPersonalizationRequestsResponse path := "/api/2.0/marketplace-provider/personalization-requests" queryParams := make(map[string]any) @@ -564,7 +1109,42 @@ func (a *providerProvidersImpl) Get(ctx context.Context, request GetProviderRequ return &getProviderResponse, err } -func (a *providerProvidersImpl) List(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { +// List providers. +// +// List provider profiles for account. +func (a *providerProvidersImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { + + getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListProvidersResponse) []ProviderInfo { + return resp.Providers + } + getNextReq := func(resp *ListProvidersResponse) *ListProvidersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List providers. +// +// List provider profiles for account. +func (a *providerProvidersImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ProviderInfo](ctx, iterator) +} + +func (a *providerProvidersImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { var listProvidersResponse ListProvidersResponse path := "/api/2.0/marketplace-provider/providers" queryParams := make(map[string]any) diff --git a/service/ml/api.go b/service/ml/api.go index c3c468759..47ca2f98d 100755 --- a/service/ml/api.go +++ b/service/ml/api.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" - "github.com/databricks/databricks-sdk-go/useragent" ) type ExperimentsInterface interface { @@ -341,46 +340,6 @@ type ExperimentsAPI struct { experimentsImpl } -// Get history of a given metric within a run. -// -// Gets a list of all values for the specified metric for a given run. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) GetHistory(ctx context.Context, request GetHistoryRequest) listing.Iterator[Metric] { - - getNextPage := func(ctx context.Context, req GetHistoryRequest) (*GetMetricHistoryResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.experimentsImpl.GetHistory(ctx, req) - } - getItems := func(resp *GetMetricHistoryResponse) []Metric { - return resp.Metrics - } - getNextReq := func(resp *GetMetricHistoryResponse) *GetHistoryRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Get history of a given metric within a run. -// -// Gets a list of all values for the specified metric for a given run. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) GetHistoryAll(ctx context.Context, request GetHistoryRequest) ([]Metric, error) { - iterator := a.GetHistory(ctx, request) - return listing.ToSliceN[Metric, int](ctx, iterator, request.MaxResults) - -} - // Get experiment permission levels. // // Gets the permission levels that a user can have on an object. @@ -400,179 +359,6 @@ func (a *ExperimentsAPI) GetPermissionsByExperimentId(ctx context.Context, exper }) } -// Get all artifacts. -// -// List artifacts for a run. Takes an optional `artifact_path` prefix. If it is -// specified, the response contains only artifacts with the specified prefix. -// This API does not support pagination when listing artifacts in UC Volumes. A -// maximum of 1000 artifacts will be retrieved for UC Volumes. Please call -// `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC -// Volumes, which supports pagination. See [List directory contents | Files -// API](/api/workspace/files/listdirectorycontents). -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) ListArtifacts(ctx context.Context, request ListArtifactsRequest) listing.Iterator[FileInfo] { - - getNextPage := func(ctx context.Context, req ListArtifactsRequest) (*ListArtifactsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.experimentsImpl.ListArtifacts(ctx, req) - } - getItems := func(resp *ListArtifactsResponse) []FileInfo { - return resp.Files - } - getNextReq := func(resp *ListArtifactsResponse) *ListArtifactsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Get all artifacts. -// -// List artifacts for a run. Takes an optional `artifact_path` prefix. If it is -// specified, the response contains only artifacts with the specified prefix. -// This API does not support pagination when listing artifacts in UC Volumes. A -// maximum of 1000 artifacts will be retrieved for UC Volumes. Please call -// `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC -// Volumes, which supports pagination. See [List directory contents | Files -// API](/api/workspace/files/listdirectorycontents). -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) ListArtifactsAll(ctx context.Context, request ListArtifactsRequest) ([]FileInfo, error) { - iterator := a.ListArtifacts(ctx, request) - return listing.ToSlice[FileInfo](ctx, iterator) -} - -// List experiments. -// -// Gets a list of all experiments. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) ListExperiments(ctx context.Context, request ListExperimentsRequest) listing.Iterator[Experiment] { - - getNextPage := func(ctx context.Context, req ListExperimentsRequest) (*ListExperimentsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.experimentsImpl.ListExperiments(ctx, req) - } - getItems := func(resp *ListExperimentsResponse) []Experiment { - return resp.Experiments - } - getNextReq := func(resp *ListExperimentsResponse) *ListExperimentsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List experiments. -// -// Gets a list of all experiments. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) ListExperimentsAll(ctx context.Context, request ListExperimentsRequest) ([]Experiment, error) { - iterator := a.ListExperiments(ctx, request) - return listing.ToSliceN[Experiment, int](ctx, iterator, request.MaxResults) - -} - -// Search experiments. -// -// Searches for experiments that satisfy specified search criteria. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) SearchExperiments(ctx context.Context, request SearchExperiments) listing.Iterator[Experiment] { - - getNextPage := func(ctx context.Context, req SearchExperiments) (*SearchExperimentsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.experimentsImpl.SearchExperiments(ctx, req) - } - getItems := func(resp *SearchExperimentsResponse) []Experiment { - return resp.Experiments - } - getNextReq := func(resp *SearchExperimentsResponse) *SearchExperiments { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Search experiments. -// -// Searches for experiments that satisfy specified search criteria. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) SearchExperimentsAll(ctx context.Context, request SearchExperiments) ([]Experiment, error) { - iterator := a.SearchExperiments(ctx, request) - return listing.ToSlice[Experiment](ctx, iterator) -} - -// Search for runs. -// -// Searches for runs that satisfy expressions. -// -// Search expressions can use `mlflowMetric` and `mlflowParam` keys.", -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) SearchRuns(ctx context.Context, request SearchRuns) listing.Iterator[Run] { - - getNextPage := func(ctx context.Context, req SearchRuns) (*SearchRunsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.experimentsImpl.SearchRuns(ctx, req) - } - getItems := func(resp *SearchRunsResponse) []Run { - return resp.Runs - } - getNextReq := func(resp *SearchRunsResponse) *SearchRuns { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Search for runs. -// -// Searches for runs that satisfy expressions. -// -// Search expressions can use `mlflowMetric` and `mlflowParam` keys.", -// -// This method is generated by Databricks SDK Code Generator. -func (a *ExperimentsAPI) SearchRunsAll(ctx context.Context, request SearchRuns) ([]Run, error) { - iterator := a.SearchRuns(ctx, request) - return listing.ToSlice[Run](ctx, iterator) -} - type ModelRegistryInterface interface { // Approve transition request. @@ -872,39 +658,6 @@ type ModelRegistryAPI struct { modelRegistryImpl } -// Get the latest version. -// -// Gets the latest version of a registered model. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) GetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) listing.Iterator[ModelVersion] { - - getNextPage := func(ctx context.Context, req GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.modelRegistryImpl.GetLatestVersions(ctx, req) - } - getItems := func(resp *GetLatestVersionsResponse) []ModelVersion { - return resp.ModelVersions - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get the latest version. -// -// Gets the latest version of a registered model. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) GetLatestVersionsAll(ctx context.Context, request GetLatestVersionsRequest) ([]ModelVersion, error) { - iterator := a.GetLatestVersions(ctx, request) - return listing.ToSlice[ModelVersion](ctx, iterator) -} - // Get registered model permission levels. // // Gets the permission levels that a user can have on an object. @@ -923,201 +676,3 @@ func (a *ModelRegistryAPI) GetPermissionsByRegisteredModelId(ctx context.Context RegisteredModelId: registeredModelId, }) } - -// List models. -// -// Lists all available registered models, up to the limit specified in -// __max_results__. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) ListModels(ctx context.Context, request ListModelsRequest) listing.Iterator[Model] { - - getNextPage := func(ctx context.Context, req ListModelsRequest) (*ListModelsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.modelRegistryImpl.ListModels(ctx, req) - } - getItems := func(resp *ListModelsResponse) []Model { - return resp.RegisteredModels - } - getNextReq := func(resp *ListModelsResponse) *ListModelsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List models. -// -// Lists all available registered models, up to the limit specified in -// __max_results__. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) ListModelsAll(ctx context.Context, request ListModelsRequest) ([]Model, error) { - iterator := a.ListModels(ctx, request) - return listing.ToSliceN[Model, int](ctx, iterator, request.MaxResults) - -} - -// List transition requests. -// -// Gets a list of all open stage transition requests for the model version. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) ListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) listing.Iterator[Activity] { - - getNextPage := func(ctx context.Context, req ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.modelRegistryImpl.ListTransitionRequests(ctx, req) - } - getItems := func(resp *ListTransitionRequestsResponse) []Activity { - return resp.Requests - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List transition requests. -// -// Gets a list of all open stage transition requests for the model version. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) ListTransitionRequestsAll(ctx context.Context, request ListTransitionRequestsRequest) ([]Activity, error) { - iterator := a.ListTransitionRequests(ctx, request) - return listing.ToSlice[Activity](ctx, iterator) -} - -// List registry webhooks. -// -// **NOTE:** This endpoint is in Public Preview. -// -// Lists all registry webhooks. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) ListWebhooks(ctx context.Context, request ListWebhooksRequest) listing.Iterator[RegistryWebhook] { - - getNextPage := func(ctx context.Context, req ListWebhooksRequest) (*ListRegistryWebhooks, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.modelRegistryImpl.ListWebhooks(ctx, req) - } - getItems := func(resp *ListRegistryWebhooks) []RegistryWebhook { - return resp.Webhooks - } - getNextReq := func(resp *ListRegistryWebhooks) *ListWebhooksRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List registry webhooks. -// -// **NOTE:** This endpoint is in Public Preview. -// -// Lists all registry webhooks. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) ListWebhooksAll(ctx context.Context, request ListWebhooksRequest) ([]RegistryWebhook, error) { - iterator := a.ListWebhooks(ctx, request) - return listing.ToSlice[RegistryWebhook](ctx, iterator) -} - -// Searches model versions. -// -// Searches for specific model versions based on the supplied __filter__. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) SearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) listing.Iterator[ModelVersion] { - - getNextPage := func(ctx context.Context, req SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.modelRegistryImpl.SearchModelVersions(ctx, req) - } - getItems := func(resp *SearchModelVersionsResponse) []ModelVersion { - return resp.ModelVersions - } - getNextReq := func(resp *SearchModelVersionsResponse) *SearchModelVersionsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Searches model versions. -// -// Searches for specific model versions based on the supplied __filter__. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) SearchModelVersionsAll(ctx context.Context, request SearchModelVersionsRequest) ([]ModelVersion, error) { - iterator := a.SearchModelVersions(ctx, request) - return listing.ToSliceN[ModelVersion, int](ctx, iterator, request.MaxResults) - -} - -// Search models. -// -// Search for registered models based on the specified __filter__. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) SearchModels(ctx context.Context, request SearchModelsRequest) listing.Iterator[Model] { - - getNextPage := func(ctx context.Context, req SearchModelsRequest) (*SearchModelsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.modelRegistryImpl.SearchModels(ctx, req) - } - getItems := func(resp *SearchModelsResponse) []Model { - return resp.RegisteredModels - } - getNextReq := func(resp *SearchModelsResponse) *SearchModelsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Search models. -// -// Search for registered models based on the specified __filter__. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ModelRegistryAPI) SearchModelsAll(ctx context.Context, request SearchModelsRequest) ([]Model, error) { - iterator := a.SearchModels(ctx, request) - return listing.ToSliceN[Model, int](ctx, iterator, request.MaxResults) - -} diff --git a/service/ml/impl.go b/service/ml/impl.go index 3141be45a..fcf826960 100755 --- a/service/ml/impl.go +++ b/service/ml/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just Experiments API methods @@ -101,7 +103,43 @@ func (a *experimentsImpl) GetExperiment(ctx context.Context, request GetExperime return &getExperimentResponse, err } -func (a *experimentsImpl) GetHistory(ctx context.Context, request GetHistoryRequest) (*GetMetricHistoryResponse, error) { +// Get history of a given metric within a run. +// +// Gets a list of all values for the specified metric for a given run. +func (a *experimentsImpl) GetHistory(ctx context.Context, request GetHistoryRequest) listing.Iterator[Metric] { + + getNextPage := func(ctx context.Context, req GetHistoryRequest) (*GetMetricHistoryResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalGetHistory(ctx, req) + } + getItems := func(resp *GetMetricHistoryResponse) []Metric { + return resp.Metrics + } + getNextReq := func(resp *GetMetricHistoryResponse) *GetHistoryRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get history of a given metric within a run. +// +// Gets a list of all values for the specified metric for a given run. +func (a *experimentsImpl) GetHistoryAll(ctx context.Context, request GetHistoryRequest) ([]Metric, error) { + iterator := a.GetHistory(ctx, request) + return listing.ToSliceN[Metric, int](ctx, iterator, request.MaxResults) + +} + +func (a *experimentsImpl) internalGetHistory(ctx context.Context, request GetHistoryRequest) (*GetMetricHistoryResponse, error) { var getMetricHistoryResponse GetMetricHistoryResponse path := "/api/2.0/mlflow/metrics/get-history" queryParams := make(map[string]any) @@ -141,7 +179,54 @@ func (a *experimentsImpl) GetRun(ctx context.Context, request GetRunRequest) (*G return &getRunResponse, err } -func (a *experimentsImpl) ListArtifacts(ctx context.Context, request ListArtifactsRequest) (*ListArtifactsResponse, error) { +// Get all artifacts. +// +// List artifacts for a run. Takes an optional `artifact_path` prefix. If it is +// specified, the response contains only artifacts with the specified prefix. +// This API does not support pagination when listing artifacts in UC Volumes. A +// maximum of 1000 artifacts will be retrieved for UC Volumes. Please call +// `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC +// Volumes, which supports pagination. See [List directory contents | Files +// API](/api/workspace/files/listdirectorycontents). +func (a *experimentsImpl) ListArtifacts(ctx context.Context, request ListArtifactsRequest) listing.Iterator[FileInfo] { + + getNextPage := func(ctx context.Context, req ListArtifactsRequest) (*ListArtifactsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListArtifacts(ctx, req) + } + getItems := func(resp *ListArtifactsResponse) []FileInfo { + return resp.Files + } + getNextReq := func(resp *ListArtifactsResponse) *ListArtifactsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get all artifacts. +// +// List artifacts for a run. Takes an optional `artifact_path` prefix. If it is +// specified, the response contains only artifacts with the specified prefix. +// This API does not support pagination when listing artifacts in UC Volumes. A +// maximum of 1000 artifacts will be retrieved for UC Volumes. Please call +// `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC +// Volumes, which supports pagination. See [List directory contents | Files +// API](/api/workspace/files/listdirectorycontents). +func (a *experimentsImpl) ListArtifactsAll(ctx context.Context, request ListArtifactsRequest) ([]FileInfo, error) { + iterator := a.ListArtifacts(ctx, request) + return listing.ToSlice[FileInfo](ctx, iterator) +} + +func (a *experimentsImpl) internalListArtifacts(ctx context.Context, request ListArtifactsRequest) (*ListArtifactsResponse, error) { var listArtifactsResponse ListArtifactsResponse path := "/api/2.0/mlflow/artifacts/list" queryParams := make(map[string]any) @@ -151,7 +236,43 @@ func (a *experimentsImpl) ListArtifacts(ctx context.Context, request ListArtifac return &listArtifactsResponse, err } -func (a *experimentsImpl) ListExperiments(ctx context.Context, request ListExperimentsRequest) (*ListExperimentsResponse, error) { +// List experiments. +// +// Gets a list of all experiments. +func (a *experimentsImpl) ListExperiments(ctx context.Context, request ListExperimentsRequest) listing.Iterator[Experiment] { + + getNextPage := func(ctx context.Context, req ListExperimentsRequest) (*ListExperimentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListExperiments(ctx, req) + } + getItems := func(resp *ListExperimentsResponse) []Experiment { + return resp.Experiments + } + getNextReq := func(resp *ListExperimentsResponse) *ListExperimentsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List experiments. +// +// Gets a list of all experiments. +func (a *experimentsImpl) ListExperimentsAll(ctx context.Context, request ListExperimentsRequest) ([]Experiment, error) { + iterator := a.ListExperiments(ctx, request) + return listing.ToSliceN[Experiment, int](ctx, iterator, request.MaxResults) + +} + +func (a *experimentsImpl) internalListExperiments(ctx context.Context, request ListExperimentsRequest) (*ListExperimentsResponse, error) { var listExperimentsResponse ListExperimentsResponse path := "/api/2.0/mlflow/experiments/list" queryParams := make(map[string]any) @@ -249,7 +370,42 @@ func (a *experimentsImpl) RestoreRuns(ctx context.Context, request RestoreRuns) return &restoreRunsResponse, err } -func (a *experimentsImpl) SearchExperiments(ctx context.Context, request SearchExperiments) (*SearchExperimentsResponse, error) { +// Search experiments. +// +// Searches for experiments that satisfy specified search criteria. +func (a *experimentsImpl) SearchExperiments(ctx context.Context, request SearchExperiments) listing.Iterator[Experiment] { + + getNextPage := func(ctx context.Context, req SearchExperiments) (*SearchExperimentsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearchExperiments(ctx, req) + } + getItems := func(resp *SearchExperimentsResponse) []Experiment { + return resp.Experiments + } + getNextReq := func(resp *SearchExperimentsResponse) *SearchExperiments { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Search experiments. +// +// Searches for experiments that satisfy specified search criteria. +func (a *experimentsImpl) SearchExperimentsAll(ctx context.Context, request SearchExperiments) ([]Experiment, error) { + iterator := a.SearchExperiments(ctx, request) + return listing.ToSlice[Experiment](ctx, iterator) +} + +func (a *experimentsImpl) internalSearchExperiments(ctx context.Context, request SearchExperiments) (*SearchExperimentsResponse, error) { var searchExperimentsResponse SearchExperimentsResponse path := "/api/2.0/mlflow/experiments/search" queryParams := make(map[string]any) @@ -260,7 +416,46 @@ func (a *experimentsImpl) SearchExperiments(ctx context.Context, request SearchE return &searchExperimentsResponse, err } -func (a *experimentsImpl) SearchRuns(ctx context.Context, request SearchRuns) (*SearchRunsResponse, error) { +// Search for runs. +// +// Searches for runs that satisfy expressions. +// +// Search expressions can use `mlflowMetric` and `mlflowParam` keys.", +func (a *experimentsImpl) SearchRuns(ctx context.Context, request SearchRuns) listing.Iterator[Run] { + + getNextPage := func(ctx context.Context, req SearchRuns) (*SearchRunsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearchRuns(ctx, req) + } + getItems := func(resp *SearchRunsResponse) []Run { + return resp.Runs + } + getNextReq := func(resp *SearchRunsResponse) *SearchRuns { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Search for runs. +// +// Searches for runs that satisfy expressions. +// +// Search expressions can use `mlflowMetric` and `mlflowParam` keys.", +func (a *experimentsImpl) SearchRunsAll(ctx context.Context, request SearchRuns) ([]Run, error) { + iterator := a.SearchRuns(ctx, request) + return listing.ToSlice[Run](ctx, iterator) +} + +func (a *experimentsImpl) internalSearchRuns(ctx context.Context, request SearchRuns) (*SearchRunsResponse, error) { var searchRunsResponse SearchRunsResponse path := "/api/2.0/mlflow/runs/search" queryParams := make(map[string]any) @@ -478,7 +673,36 @@ func (a *modelRegistryImpl) DeleteWebhook(ctx context.Context, request DeleteWeb return err } -func (a *modelRegistryImpl) GetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { +// Get the latest version. +// +// Gets the latest version of a registered model. +func (a *modelRegistryImpl) GetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) listing.Iterator[ModelVersion] { + + getNextPage := func(ctx context.Context, req GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalGetLatestVersions(ctx, req) + } + getItems := func(resp *GetLatestVersionsResponse) []ModelVersion { + return resp.ModelVersions + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get the latest version. +// +// Gets the latest version of a registered model. +func (a *modelRegistryImpl) GetLatestVersionsAll(ctx context.Context, request GetLatestVersionsRequest) ([]ModelVersion, error) { + iterator := a.GetLatestVersions(ctx, request) + return listing.ToSlice[ModelVersion](ctx, iterator) +} + +func (a *modelRegistryImpl) internalGetLatestVersions(ctx context.Context, request GetLatestVersionsRequest) (*GetLatestVersionsResponse, error) { var getLatestVersionsResponse GetLatestVersionsResponse path := "/api/2.0/mlflow/registered-models/get-latest-versions" queryParams := make(map[string]any) @@ -539,7 +763,45 @@ func (a *modelRegistryImpl) GetPermissions(ctx context.Context, request GetRegis return ®isteredModelPermissions, err } -func (a *modelRegistryImpl) ListModels(ctx context.Context, request ListModelsRequest) (*ListModelsResponse, error) { +// List models. +// +// Lists all available registered models, up to the limit specified in +// __max_results__. +func (a *modelRegistryImpl) ListModels(ctx context.Context, request ListModelsRequest) listing.Iterator[Model] { + + getNextPage := func(ctx context.Context, req ListModelsRequest) (*ListModelsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListModels(ctx, req) + } + getItems := func(resp *ListModelsResponse) []Model { + return resp.RegisteredModels + } + getNextReq := func(resp *ListModelsResponse) *ListModelsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List models. +// +// Lists all available registered models, up to the limit specified in +// __max_results__. +func (a *modelRegistryImpl) ListModelsAll(ctx context.Context, request ListModelsRequest) ([]Model, error) { + iterator := a.ListModels(ctx, request) + return listing.ToSliceN[Model, int](ctx, iterator, request.MaxResults) + +} + +func (a *modelRegistryImpl) internalListModels(ctx context.Context, request ListModelsRequest) (*ListModelsResponse, error) { var listModelsResponse ListModelsResponse path := "/api/2.0/mlflow/registered-models/list" queryParams := make(map[string]any) @@ -549,7 +811,36 @@ func (a *modelRegistryImpl) ListModels(ctx context.Context, request ListModelsRe return &listModelsResponse, err } -func (a *modelRegistryImpl) ListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { +// List transition requests. +// +// Gets a list of all open stage transition requests for the model version. +func (a *modelRegistryImpl) ListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) listing.Iterator[Activity] { + + getNextPage := func(ctx context.Context, req ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListTransitionRequests(ctx, req) + } + getItems := func(resp *ListTransitionRequestsResponse) []Activity { + return resp.Requests + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List transition requests. +// +// Gets a list of all open stage transition requests for the model version. +func (a *modelRegistryImpl) ListTransitionRequestsAll(ctx context.Context, request ListTransitionRequestsRequest) ([]Activity, error) { + iterator := a.ListTransitionRequests(ctx, request) + return listing.ToSlice[Activity](ctx, iterator) +} + +func (a *modelRegistryImpl) internalListTransitionRequests(ctx context.Context, request ListTransitionRequestsRequest) (*ListTransitionRequestsResponse, error) { var listTransitionRequestsResponse ListTransitionRequestsResponse path := "/api/2.0/mlflow/transition-requests/list" queryParams := make(map[string]any) @@ -559,7 +850,46 @@ func (a *modelRegistryImpl) ListTransitionRequests(ctx context.Context, request return &listTransitionRequestsResponse, err } -func (a *modelRegistryImpl) ListWebhooks(ctx context.Context, request ListWebhooksRequest) (*ListRegistryWebhooks, error) { +// List registry webhooks. +// +// **NOTE:** This endpoint is in Public Preview. +// +// Lists all registry webhooks. +func (a *modelRegistryImpl) ListWebhooks(ctx context.Context, request ListWebhooksRequest) listing.Iterator[RegistryWebhook] { + + getNextPage := func(ctx context.Context, req ListWebhooksRequest) (*ListRegistryWebhooks, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListWebhooks(ctx, req) + } + getItems := func(resp *ListRegistryWebhooks) []RegistryWebhook { + return resp.Webhooks + } + getNextReq := func(resp *ListRegistryWebhooks) *ListWebhooksRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List registry webhooks. +// +// **NOTE:** This endpoint is in Public Preview. +// +// Lists all registry webhooks. +func (a *modelRegistryImpl) ListWebhooksAll(ctx context.Context, request ListWebhooksRequest) ([]RegistryWebhook, error) { + iterator := a.ListWebhooks(ctx, request) + return listing.ToSlice[RegistryWebhook](ctx, iterator) +} + +func (a *modelRegistryImpl) internalListWebhooks(ctx context.Context, request ListWebhooksRequest) (*ListRegistryWebhooks, error) { var listRegistryWebhooks ListRegistryWebhooks path := "/api/2.0/mlflow/registry-webhooks/list" queryParams := make(map[string]any) @@ -591,7 +921,43 @@ func (a *modelRegistryImpl) RenameModel(ctx context.Context, request RenameModel return &renameModelResponse, err } -func (a *modelRegistryImpl) SearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { +// Searches model versions. +// +// Searches for specific model versions based on the supplied __filter__. +func (a *modelRegistryImpl) SearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) listing.Iterator[ModelVersion] { + + getNextPage := func(ctx context.Context, req SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearchModelVersions(ctx, req) + } + getItems := func(resp *SearchModelVersionsResponse) []ModelVersion { + return resp.ModelVersions + } + getNextReq := func(resp *SearchModelVersionsResponse) *SearchModelVersionsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Searches model versions. +// +// Searches for specific model versions based on the supplied __filter__. +func (a *modelRegistryImpl) SearchModelVersionsAll(ctx context.Context, request SearchModelVersionsRequest) ([]ModelVersion, error) { + iterator := a.SearchModelVersions(ctx, request) + return listing.ToSliceN[ModelVersion, int](ctx, iterator, request.MaxResults) + +} + +func (a *modelRegistryImpl) internalSearchModelVersions(ctx context.Context, request SearchModelVersionsRequest) (*SearchModelVersionsResponse, error) { var searchModelVersionsResponse SearchModelVersionsResponse path := "/api/2.0/mlflow/model-versions/search" queryParams := make(map[string]any) @@ -601,7 +967,43 @@ func (a *modelRegistryImpl) SearchModelVersions(ctx context.Context, request Sea return &searchModelVersionsResponse, err } -func (a *modelRegistryImpl) SearchModels(ctx context.Context, request SearchModelsRequest) (*SearchModelsResponse, error) { +// Search models. +// +// Search for registered models based on the specified __filter__. +func (a *modelRegistryImpl) SearchModels(ctx context.Context, request SearchModelsRequest) listing.Iterator[Model] { + + getNextPage := func(ctx context.Context, req SearchModelsRequest) (*SearchModelsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalSearchModels(ctx, req) + } + getItems := func(resp *SearchModelsResponse) []Model { + return resp.RegisteredModels + } + getNextReq := func(resp *SearchModelsResponse) *SearchModelsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Search models. +// +// Search for registered models based on the specified __filter__. +func (a *modelRegistryImpl) SearchModelsAll(ctx context.Context, request SearchModelsRequest) ([]Model, error) { + iterator := a.SearchModels(ctx, request) + return listing.ToSliceN[Model, int](ctx, iterator, request.MaxResults) + +} + +func (a *modelRegistryImpl) internalSearchModels(ctx context.Context, request SearchModelsRequest) (*SearchModelsResponse, error) { var searchModelsResponse SearchModelsResponse path := "/api/2.0/mlflow/registered-models/search" queryParams := make(map[string]any) diff --git a/service/oauth2/api.go b/service/oauth2/api.go index 5119197e3..e8fc20058 100755 --- a/service/oauth2/api.go +++ b/service/oauth2/api.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" - "github.com/databricks/databricks-sdk-go/useragent" ) type AccountFederationPolicyInterface interface { @@ -118,41 +117,6 @@ func (a *AccountFederationPolicyAPI) GetByPolicyId(ctx context.Context, policyId }) } -// List account federation policies. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountFederationPolicyAPI) List(ctx context.Context, request ListAccountFederationPoliciesRequest) listing.Iterator[FederationPolicy] { - - getNextPage := func(ctx context.Context, req ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.accountFederationPolicyImpl.List(ctx, req) - } - getItems := func(resp *ListFederationPoliciesResponse) []FederationPolicy { - return resp.Policies - } - getNextReq := func(resp *ListFederationPoliciesResponse) *ListAccountFederationPoliciesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List account federation policies. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountFederationPolicyAPI) ListAll(ctx context.Context, request ListAccountFederationPoliciesRequest) ([]FederationPolicy, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[FederationPolicy](ctx, iterator) -} - type CustomAppIntegrationInterface interface { // Create Custom OAuth App Integration. @@ -242,47 +206,6 @@ func (a *CustomAppIntegrationAPI) GetByIntegrationId(ctx context.Context, integr }) } -// Get custom oauth app integrations. -// -// Get the list of custom OAuth app integrations for the specified Databricks -// account -// -// This method is generated by Databricks SDK Code Generator. -func (a *CustomAppIntegrationAPI) List(ctx context.Context, request ListCustomAppIntegrationsRequest) listing.Iterator[GetCustomAppIntegrationOutput] { - - getNextPage := func(ctx context.Context, req ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.customAppIntegrationImpl.List(ctx, req) - } - getItems := func(resp *GetCustomAppIntegrationsOutput) []GetCustomAppIntegrationOutput { - return resp.Apps - } - getNextReq := func(resp *GetCustomAppIntegrationsOutput) *ListCustomAppIntegrationsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Get custom oauth app integrations. -// -// Get the list of custom OAuth app integrations for the specified Databricks -// account -// -// This method is generated by Databricks SDK Code Generator. -func (a *CustomAppIntegrationAPI) ListAll(ctx context.Context, request ListCustomAppIntegrationsRequest) ([]GetCustomAppIntegrationOutput, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[GetCustomAppIntegrationOutput](ctx, iterator) -} - type OAuthPublishedAppsInterface interface { // Get all the published OAuth apps. @@ -316,45 +239,6 @@ type OAuthPublishedAppsAPI struct { oAuthPublishedAppsImpl } -// Get all the published OAuth apps. -// -// Get all the available published OAuth apps in Databricks. -// -// This method is generated by Databricks SDK Code Generator. -func (a *OAuthPublishedAppsAPI) List(ctx context.Context, request ListOAuthPublishedAppsRequest) listing.Iterator[PublishedAppOutput] { - - getNextPage := func(ctx context.Context, req ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.oAuthPublishedAppsImpl.List(ctx, req) - } - getItems := func(resp *GetPublishedAppsOutput) []PublishedAppOutput { - return resp.Apps - } - getNextReq := func(resp *GetPublishedAppsOutput) *ListOAuthPublishedAppsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Get all the published OAuth apps. -// -// Get all the available published OAuth apps in Databricks. -// -// This method is generated by Databricks SDK Code Generator. -func (a *OAuthPublishedAppsAPI) ListAll(ctx context.Context, request ListOAuthPublishedAppsRequest) ([]PublishedAppOutput, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[PublishedAppOutput](ctx, iterator) -} - type PublishedAppIntegrationInterface interface { // Create Published OAuth App Integration. @@ -444,47 +328,6 @@ func (a *PublishedAppIntegrationAPI) GetByIntegrationId(ctx context.Context, int }) } -// Get published oauth app integrations. -// -// Get the list of published OAuth app integrations for the specified Databricks -// account -// -// This method is generated by Databricks SDK Code Generator. -func (a *PublishedAppIntegrationAPI) List(ctx context.Context, request ListPublishedAppIntegrationsRequest) listing.Iterator[GetPublishedAppIntegrationOutput] { - - getNextPage := func(ctx context.Context, req ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.publishedAppIntegrationImpl.List(ctx, req) - } - getItems := func(resp *GetPublishedAppIntegrationsOutput) []GetPublishedAppIntegrationOutput { - return resp.Apps - } - getNextReq := func(resp *GetPublishedAppIntegrationsOutput) *ListPublishedAppIntegrationsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Get published oauth app integrations. -// -// Get the list of published OAuth app integrations for the specified Databricks -// account -// -// This method is generated by Databricks SDK Code Generator. -func (a *PublishedAppIntegrationAPI) ListAll(ctx context.Context, request ListPublishedAppIntegrationsRequest) ([]GetPublishedAppIntegrationOutput, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[GetPublishedAppIntegrationOutput](ctx, iterator) -} - type ServicePrincipalFederationPolicyInterface interface { // Create service principal federation policy. @@ -601,44 +444,9 @@ func (a *ServicePrincipalFederationPolicyAPI) GetByServicePrincipalIdAndPolicyId }) } -// List service principal federation policies. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ServicePrincipalFederationPolicyAPI) List(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) listing.Iterator[FederationPolicy] { - - getNextPage := func(ctx context.Context, req ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.servicePrincipalFederationPolicyImpl.List(ctx, req) - } - getItems := func(resp *ListFederationPoliciesResponse) []FederationPolicy { - return resp.Policies - } - getNextReq := func(resp *ListFederationPoliciesResponse) *ListServicePrincipalFederationPoliciesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List service principal federation policies. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ServicePrincipalFederationPolicyAPI) ListAll(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) ([]FederationPolicy, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[FederationPolicy](ctx, iterator) -} - // List service principal federation policies. func (a *ServicePrincipalFederationPolicyAPI) ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListFederationPoliciesResponse, error) { - return a.servicePrincipalFederationPolicyImpl.List(ctx, ListServicePrincipalFederationPoliciesRequest{ + return a.servicePrincipalFederationPolicyImpl.internalList(ctx, ListServicePrincipalFederationPoliciesRequest{ ServicePrincipalId: servicePrincipalId, }) } @@ -721,56 +529,13 @@ func (a *ServicePrincipalSecretsAPI) DeleteByServicePrincipalIdAndSecretId(ctx c }) } -// List service principal secrets. -// -// List all secrets associated with the given service principal. This operation -// only returns information about the secrets themselves and does not include -// the secret values. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ServicePrincipalSecretsAPI) List(ctx context.Context, request ListServicePrincipalSecretsRequest) listing.Iterator[SecretInfo] { - - getNextPage := func(ctx context.Context, req ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.servicePrincipalSecretsImpl.List(ctx, req) - } - getItems := func(resp *ListServicePrincipalSecretsResponse) []SecretInfo { - return resp.Secrets - } - getNextReq := func(resp *ListServicePrincipalSecretsResponse) *ListServicePrincipalSecretsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List service principal secrets. -// -// List all secrets associated with the given service principal. This operation -// only returns information about the secrets themselves and does not include -// the secret values. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ServicePrincipalSecretsAPI) ListAll(ctx context.Context, request ListServicePrincipalSecretsRequest) ([]SecretInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[SecretInfo](ctx, iterator) -} - // List service principal secrets. // // List all secrets associated with the given service principal. This operation // only returns information about the secrets themselves and does not include // the secret values. func (a *ServicePrincipalSecretsAPI) ListByServicePrincipalId(ctx context.Context, servicePrincipalId int64) (*ListServicePrincipalSecretsResponse, error) { - return a.servicePrincipalSecretsImpl.List(ctx, ListServicePrincipalSecretsRequest{ + return a.servicePrincipalSecretsImpl.internalList(ctx, ListServicePrincipalSecretsRequest{ ServicePrincipalId: servicePrincipalId, }) } diff --git a/service/oauth2/impl.go b/service/oauth2/impl.go index d8db890dc..b16f7b7a1 100755 --- a/service/oauth2/impl.go +++ b/service/oauth2/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" "golang.org/x/exp/slices" ) @@ -50,7 +52,38 @@ func (a *accountFederationPolicyImpl) Get(ctx context.Context, request GetAccoun return &federationPolicy, err } -func (a *accountFederationPolicyImpl) List(ctx context.Context, request ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { +// List account federation policies. +func (a *accountFederationPolicyImpl) List(ctx context.Context, request ListAccountFederationPoliciesRequest) listing.Iterator[FederationPolicy] { + + getNextPage := func(ctx context.Context, req ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFederationPoliciesResponse) []FederationPolicy { + return resp.Policies + } + getNextReq := func(resp *ListFederationPoliciesResponse) *ListAccountFederationPoliciesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List account federation policies. +func (a *accountFederationPolicyImpl) ListAll(ctx context.Context, request ListAccountFederationPoliciesRequest) ([]FederationPolicy, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FederationPolicy](ctx, iterator) +} + +func (a *accountFederationPolicyImpl) internalList(ctx context.Context, request ListAccountFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { var listFederationPoliciesResponse ListFederationPoliciesResponse path := fmt.Sprintf("/api/2.0/accounts/%v/federationPolicies", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -110,7 +143,44 @@ func (a *customAppIntegrationImpl) Get(ctx context.Context, request GetCustomApp return &getCustomAppIntegrationOutput, err } -func (a *customAppIntegrationImpl) List(ctx context.Context, request ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { +// Get custom oauth app integrations. +// +// Get the list of custom OAuth app integrations for the specified Databricks +// account +func (a *customAppIntegrationImpl) List(ctx context.Context, request ListCustomAppIntegrationsRequest) listing.Iterator[GetCustomAppIntegrationOutput] { + + getNextPage := func(ctx context.Context, req ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *GetCustomAppIntegrationsOutput) []GetCustomAppIntegrationOutput { + return resp.Apps + } + getNextReq := func(resp *GetCustomAppIntegrationsOutput) *ListCustomAppIntegrationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get custom oauth app integrations. +// +// Get the list of custom OAuth app integrations for the specified Databricks +// account +func (a *customAppIntegrationImpl) ListAll(ctx context.Context, request ListCustomAppIntegrationsRequest) ([]GetCustomAppIntegrationOutput, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[GetCustomAppIntegrationOutput](ctx, iterator) +} + +func (a *customAppIntegrationImpl) internalList(ctx context.Context, request ListCustomAppIntegrationsRequest) (*GetCustomAppIntegrationsOutput, error) { var getCustomAppIntegrationsOutput GetCustomAppIntegrationsOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/custom-app-integrations", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -136,7 +206,42 @@ type oAuthPublishedAppsImpl struct { client *client.DatabricksClient } -func (a *oAuthPublishedAppsImpl) List(ctx context.Context, request ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { +// Get all the published OAuth apps. +// +// Get all the available published OAuth apps in Databricks. +func (a *oAuthPublishedAppsImpl) List(ctx context.Context, request ListOAuthPublishedAppsRequest) listing.Iterator[PublishedAppOutput] { + + getNextPage := func(ctx context.Context, req ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *GetPublishedAppsOutput) []PublishedAppOutput { + return resp.Apps + } + getNextReq := func(resp *GetPublishedAppsOutput) *ListOAuthPublishedAppsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get all the published OAuth apps. +// +// Get all the available published OAuth apps in Databricks. +func (a *oAuthPublishedAppsImpl) ListAll(ctx context.Context, request ListOAuthPublishedAppsRequest) ([]PublishedAppOutput, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[PublishedAppOutput](ctx, iterator) +} + +func (a *oAuthPublishedAppsImpl) internalList(ctx context.Context, request ListOAuthPublishedAppsRequest) (*GetPublishedAppsOutput, error) { var getPublishedAppsOutput GetPublishedAppsOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/published-apps", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -182,7 +287,44 @@ func (a *publishedAppIntegrationImpl) Get(ctx context.Context, request GetPublis return &getPublishedAppIntegrationOutput, err } -func (a *publishedAppIntegrationImpl) List(ctx context.Context, request ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { +// Get published oauth app integrations. +// +// Get the list of published OAuth app integrations for the specified Databricks +// account +func (a *publishedAppIntegrationImpl) List(ctx context.Context, request ListPublishedAppIntegrationsRequest) listing.Iterator[GetPublishedAppIntegrationOutput] { + + getNextPage := func(ctx context.Context, req ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *GetPublishedAppIntegrationsOutput) []GetPublishedAppIntegrationOutput { + return resp.Apps + } + getNextReq := func(resp *GetPublishedAppIntegrationsOutput) *ListPublishedAppIntegrationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get published oauth app integrations. +// +// Get the list of published OAuth app integrations for the specified Databricks +// account +func (a *publishedAppIntegrationImpl) ListAll(ctx context.Context, request ListPublishedAppIntegrationsRequest) ([]GetPublishedAppIntegrationOutput, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[GetPublishedAppIntegrationOutput](ctx, iterator) +} + +func (a *publishedAppIntegrationImpl) internalList(ctx context.Context, request ListPublishedAppIntegrationsRequest) (*GetPublishedAppIntegrationsOutput, error) { var getPublishedAppIntegrationsOutput GetPublishedAppIntegrationsOutput path := fmt.Sprintf("/api/2.0/accounts/%v/oauth2/published-app-integrations", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -242,7 +384,38 @@ func (a *servicePrincipalFederationPolicyImpl) Get(ctx context.Context, request return &federationPolicy, err } -func (a *servicePrincipalFederationPolicyImpl) List(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { +// List service principal federation policies. +func (a *servicePrincipalFederationPolicyImpl) List(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) listing.Iterator[FederationPolicy] { + + getNextPage := func(ctx context.Context, req ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListFederationPoliciesResponse) []FederationPolicy { + return resp.Policies + } + getNextReq := func(resp *ListFederationPoliciesResponse) *ListServicePrincipalFederationPoliciesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List service principal federation policies. +func (a *servicePrincipalFederationPolicyImpl) ListAll(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) ([]FederationPolicy, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[FederationPolicy](ctx, iterator) +} + +func (a *servicePrincipalFederationPolicyImpl) internalList(ctx context.Context, request ListServicePrincipalFederationPoliciesRequest) (*ListFederationPoliciesResponse, error) { var listFederationPoliciesResponse ListFederationPoliciesResponse path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/federationPolicies", a.client.ConfiguredAccountID(), request.ServicePrincipalId) queryParams := make(map[string]any) @@ -290,7 +463,46 @@ func (a *servicePrincipalSecretsImpl) Delete(ctx context.Context, request Delete return err } -func (a *servicePrincipalSecretsImpl) List(ctx context.Context, request ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { +// List service principal secrets. +// +// List all secrets associated with the given service principal. This operation +// only returns information about the secrets themselves and does not include +// the secret values. +func (a *servicePrincipalSecretsImpl) List(ctx context.Context, request ListServicePrincipalSecretsRequest) listing.Iterator[SecretInfo] { + + getNextPage := func(ctx context.Context, req ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListServicePrincipalSecretsResponse) []SecretInfo { + return resp.Secrets + } + getNextReq := func(resp *ListServicePrincipalSecretsResponse) *ListServicePrincipalSecretsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List service principal secrets. +// +// List all secrets associated with the given service principal. This operation +// only returns information about the secrets themselves and does not include +// the secret values. +func (a *servicePrincipalSecretsImpl) ListAll(ctx context.Context, request ListServicePrincipalSecretsRequest) ([]SecretInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[SecretInfo](ctx, iterator) +} + +func (a *servicePrincipalSecretsImpl) internalList(ctx context.Context, request ListServicePrincipalSecretsRequest) (*ListServicePrincipalSecretsResponse, error) { var listServicePrincipalSecretsResponse ListServicePrincipalSecretsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/servicePrincipals/%v/credentials/secrets", a.client.ConfiguredAccountID(), request.ServicePrincipalId) queryParams := make(map[string]any) diff --git a/service/pipelines/api.go b/service/pipelines/api.go index 6654fe5bf..be5ff8121 100755 --- a/service/pipelines/api.go +++ b/service/pipelines/api.go @@ -358,95 +358,15 @@ func (a *PipelinesAPI) GetUpdateByPipelineIdAndUpdateId(ctx context.Context, pip }) } -// List pipeline events. -// -// Retrieves events for a pipeline. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PipelinesAPI) ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) listing.Iterator[PipelineEvent] { - - getNextPage := func(ctx context.Context, req ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.pipelinesImpl.ListPipelineEvents(ctx, req) - } - getItems := func(resp *ListPipelineEventsResponse) []PipelineEvent { - return resp.Events - } - getNextReq := func(resp *ListPipelineEventsResponse) *ListPipelineEventsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List pipeline events. -// -// Retrieves events for a pipeline. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PipelinesAPI) ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error) { - iterator := a.ListPipelineEvents(ctx, request) - return listing.ToSliceN[PipelineEvent, int](ctx, iterator, request.MaxResults) - -} - // List pipeline events. // // Retrieves events for a pipeline. func (a *PipelinesAPI) ListPipelineEventsByPipelineId(ctx context.Context, pipelineId string) (*ListPipelineEventsResponse, error) { - return a.pipelinesImpl.ListPipelineEvents(ctx, ListPipelineEventsRequest{ + return a.pipelinesImpl.internalListPipelineEvents(ctx, ListPipelineEventsRequest{ PipelineId: pipelineId, }) } -// List pipelines. -// -// Lists pipelines defined in the Delta Live Tables system. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PipelinesAPI) ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] { - - getNextPage := func(ctx context.Context, req ListPipelinesRequest) (*ListPipelinesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.pipelinesImpl.ListPipelines(ctx, req) - } - getItems := func(resp *ListPipelinesResponse) []PipelineStateInfo { - return resp.Statuses - } - getNextReq := func(resp *ListPipelinesResponse) *ListPipelinesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List pipelines. -// -// Lists pipelines defined in the Delta Live Tables system. -// -// This method is generated by Databricks SDK Code Generator. -func (a *PipelinesAPI) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) { - iterator := a.ListPipelines(ctx, request) - return listing.ToSliceN[PipelineStateInfo, int](ctx, iterator, request.MaxResults) - -} - // PipelineStateInfoNameToPipelineIdMap calls [PipelinesAPI.ListPipelinesAll] and creates a map of results with [PipelineStateInfo].Name as key and [PipelineStateInfo].PipelineId as value. // // Returns an error if there's more than one [PipelineStateInfo] with the same .Name. diff --git a/service/pipelines/impl.go b/service/pipelines/impl.go index d4ad82054..588d752fd 100755 --- a/service/pipelines/impl.go +++ b/service/pipelines/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just Pipelines API methods @@ -76,7 +78,43 @@ func (a *pipelinesImpl) GetUpdate(ctx context.Context, request GetUpdateRequest) return &getUpdateResponse, err } -func (a *pipelinesImpl) ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { +// List pipeline events. +// +// Retrieves events for a pipeline. +func (a *pipelinesImpl) ListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) listing.Iterator[PipelineEvent] { + + getNextPage := func(ctx context.Context, req ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListPipelineEvents(ctx, req) + } + getItems := func(resp *ListPipelineEventsResponse) []PipelineEvent { + return resp.Events + } + getNextReq := func(resp *ListPipelineEventsResponse) *ListPipelineEventsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List pipeline events. +// +// Retrieves events for a pipeline. +func (a *pipelinesImpl) ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error) { + iterator := a.ListPipelineEvents(ctx, request) + return listing.ToSliceN[PipelineEvent, int](ctx, iterator, request.MaxResults) + +} + +func (a *pipelinesImpl) internalListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { var listPipelineEventsResponse ListPipelineEventsResponse path := fmt.Sprintf("/api/2.0/pipelines/%v/events", request.PipelineId) queryParams := make(map[string]any) @@ -86,7 +124,43 @@ func (a *pipelinesImpl) ListPipelineEvents(ctx context.Context, request ListPipe return &listPipelineEventsResponse, err } -func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) { +// List pipelines. +// +// Lists pipelines defined in the Delta Live Tables system. +func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelinesRequest) listing.Iterator[PipelineStateInfo] { + + getNextPage := func(ctx context.Context, req ListPipelinesRequest) (*ListPipelinesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListPipelines(ctx, req) + } + getItems := func(resp *ListPipelinesResponse) []PipelineStateInfo { + return resp.Statuses + } + getNextReq := func(resp *ListPipelinesResponse) *ListPipelinesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List pipelines. +// +// Lists pipelines defined in the Delta Live Tables system. +func (a *pipelinesImpl) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) { + iterator := a.ListPipelines(ctx, request) + return listing.ToSliceN[PipelineStateInfo, int](ctx, iterator, request.MaxResults) + +} + +func (a *pipelinesImpl) internalListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) { var listPipelinesResponse ListPipelinesResponse path := "/api/2.0/pipelines" queryParams := make(map[string]any) diff --git a/service/serving/api.go b/service/serving/api.go index 72274627a..26b3e65b6 100755 --- a/service/serving/api.go +++ b/service/serving/api.go @@ -369,36 +369,6 @@ func (a *ServingEndpointsAPI) GetPermissionsByServingEndpointId(ctx context.Cont }) } -// Get all serving endpoints. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ServingEndpointsAPI) List(ctx context.Context) listing.Iterator[ServingEndpoint] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListEndpointsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.servingEndpointsImpl.List(ctx) - } - getItems := func(resp *ListEndpointsResponse) []ServingEndpoint { - return resp.Endpoints - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get all serving endpoints. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ServingEndpointsAPI) ListAll(ctx context.Context) ([]ServingEndpoint, error) { - iterator := a.List(ctx) - return listing.ToSlice[ServingEndpoint](ctx, iterator) -} - // Get the latest logs for a served model. // // Retrieves the service logs associated with the provided served model. diff --git a/service/serving/impl.go b/service/serving/impl.go index 9eda39ef7..389c796b2 100755 --- a/service/serving/impl.go +++ b/service/serving/impl.go @@ -10,6 +10,8 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" goauth "golang.org/x/oauth2" ) @@ -110,7 +112,33 @@ func (a *servingEndpointsImpl) HttpRequest(ctx context.Context, request External return &httpRequestResponse, err } -func (a *servingEndpointsImpl) List(ctx context.Context) (*ListEndpointsResponse, error) { +// Get all serving endpoints. +func (a *servingEndpointsImpl) List(ctx context.Context) listing.Iterator[ServingEndpoint] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListEndpointsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListEndpointsResponse) []ServingEndpoint { + return resp.Endpoints + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get all serving endpoints. +func (a *servingEndpointsImpl) ListAll(ctx context.Context) ([]ServingEndpoint, error) { + iterator := a.List(ctx) + return listing.ToSlice[ServingEndpoint](ctx, iterator) +} + +func (a *servingEndpointsImpl) internalList(ctx context.Context) (*ListEndpointsResponse, error) { var listEndpointsResponse ListEndpointsResponse path := "/api/2.0/serving-endpoints" diff --git a/service/serving/model.go b/service/serving/model.go index 1fc96710a..749c6a71b 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -868,8 +868,8 @@ type ExternalModel struct { PalmConfig *PaLmConfig `json:"palm_config,omitempty"` // The name of the provider for the external model. Currently, the supported // providers are 'ai21labs', 'anthropic', 'amazon-bedrock', 'cohere', - // 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', and - // 'palm'. + // 'databricks-model-serving', 'google-cloud-vertex-ai', 'openai', 'palm', + // and 'custom'. Provider ExternalModelProvider `json:"provider"` // The task type of the external model. Task string `json:"task"` diff --git a/service/settings/api.go b/service/settings/api.go index a63c584b9..039558db5 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -170,40 +170,6 @@ func (a *AccountIpAccessListsAPI) GetByIpAccessListId(ctx context.Context, ipAcc }) } -// Get access lists. -// -// Gets all IP access lists for the specified account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountIpAccessListsAPI) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*GetIpAccessListsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.accountIpAccessListsImpl.List(ctx) - } - getItems := func(resp *GetIpAccessListsResponse) []IpAccessListInfo { - return resp.IpAccessLists - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get access lists. -// -// Gets all IP access lists for the specified account. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AccountIpAccessListsAPI) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { - iterator := a.List(ctx) - return listing.ToSlice[IpAccessListInfo](ctx, iterator) -} - // IpAccessListInfoLabelToListIdMap calls [AccountIpAccessListsAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. @@ -1002,40 +968,6 @@ func (a *IpAccessListsAPI) GetByIpAccessListId(ctx context.Context, ipAccessList }) } -// Get access lists. -// -// Gets all IP access lists for the specified workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *IpAccessListsAPI) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListIpAccessListResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.ipAccessListsImpl.List(ctx) - } - getItems := func(resp *ListIpAccessListResponse) []IpAccessListInfo { - return resp.IpAccessLists - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get access lists. -// -// Gets all IP access lists for the specified workspace. -// -// This method is generated by Databricks SDK Code Generator. -func (a *IpAccessListsAPI) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { - iterator := a.List(ctx) - return listing.ToSlice[IpAccessListInfo](ctx, iterator) -} - // IpAccessListInfoLabelToListIdMap calls [IpAccessListsAPI.ListAll] and creates a map of results with [IpAccessListInfo].Label as key and [IpAccessListInfo].ListId as value. // // Returns an error if there's more than one [IpAccessListInfo] with the same .Label. @@ -1249,89 +1181,11 @@ func (a *NetworkConnectivityAPI) GetPrivateEndpointRuleByNetworkConnectivityConf }) } -// List network connectivity configurations. -// -// Gets an array of network connectivity configurations. -// -// This method is generated by Databricks SDK Code Generator. -func (a *NetworkConnectivityAPI) ListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) listing.Iterator[NetworkConnectivityConfiguration] { - - getNextPage := func(ctx context.Context, req ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.networkConnectivityImpl.ListNetworkConnectivityConfigurations(ctx, req) - } - getItems := func(resp *ListNetworkConnectivityConfigurationsResponse) []NetworkConnectivityConfiguration { - return resp.Items - } - getNextReq := func(resp *ListNetworkConnectivityConfigurationsResponse) *ListNetworkConnectivityConfigurationsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List network connectivity configurations. -// -// Gets an array of network connectivity configurations. -// -// This method is generated by Databricks SDK Code Generator. -func (a *NetworkConnectivityAPI) ListNetworkConnectivityConfigurationsAll(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) ([]NetworkConnectivityConfiguration, error) { - iterator := a.ListNetworkConnectivityConfigurations(ctx, request) - return listing.ToSlice[NetworkConnectivityConfiguration](ctx, iterator) -} - -// List private endpoint rules. -// -// Gets an array of private endpoint rules. -// -// This method is generated by Databricks SDK Code Generator. -func (a *NetworkConnectivityAPI) ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) listing.Iterator[NccAzurePrivateEndpointRule] { - - getNextPage := func(ctx context.Context, req ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.networkConnectivityImpl.ListPrivateEndpointRules(ctx, req) - } - getItems := func(resp *ListNccAzurePrivateEndpointRulesResponse) []NccAzurePrivateEndpointRule { - return resp.Items - } - getNextReq := func(resp *ListNccAzurePrivateEndpointRulesResponse) *ListPrivateEndpointRulesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List private endpoint rules. -// -// Gets an array of private endpoint rules. -// -// This method is generated by Databricks SDK Code Generator. -func (a *NetworkConnectivityAPI) ListPrivateEndpointRulesAll(ctx context.Context, request ListPrivateEndpointRulesRequest) ([]NccAzurePrivateEndpointRule, error) { - iterator := a.ListPrivateEndpointRules(ctx, request) - return listing.ToSlice[NccAzurePrivateEndpointRule](ctx, iterator) -} - // List private endpoint rules. // // Gets an array of private endpoint rules. func (a *NetworkConnectivityAPI) ListPrivateEndpointRulesByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*ListNccAzurePrivateEndpointRulesResponse, error) { - return a.networkConnectivityImpl.ListPrivateEndpointRules(ctx, ListPrivateEndpointRulesRequest{ + return a.networkConnectivityImpl.internalListPrivateEndpointRules(ctx, ListPrivateEndpointRulesRequest{ NetworkConnectivityConfigId: networkConnectivityConfigId, }) } @@ -1419,45 +1273,6 @@ func (a *NotificationDestinationsAPI) GetById(ctx context.Context, id string) (* }) } -// List notification destinations. -// -// Lists notification destinations. -// -// This method is generated by Databricks SDK Code Generator. -func (a *NotificationDestinationsAPI) List(ctx context.Context, request ListNotificationDestinationsRequest) listing.Iterator[ListNotificationDestinationsResult] { - - getNextPage := func(ctx context.Context, req ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.notificationDestinationsImpl.List(ctx, req) - } - getItems := func(resp *ListNotificationDestinationsResponse) []ListNotificationDestinationsResult { - return resp.Results - } - getNextReq := func(resp *ListNotificationDestinationsResponse) *ListNotificationDestinationsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List notification destinations. -// -// Lists notification destinations. -// -// This method is generated by Databricks SDK Code Generator. -func (a *NotificationDestinationsAPI) ListAll(ctx context.Context, request ListNotificationDestinationsRequest) ([]ListNotificationDestinationsResult, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ListNotificationDestinationsResult](ctx, iterator) -} - type PersonalComputeInterface interface { // Delete Personal Compute setting. @@ -1884,39 +1699,6 @@ func (a *TokenManagementAPI) GetByTokenId(ctx context.Context, tokenId string) ( }) } -// List all tokens. -// -// Lists all tokens associated with the specified workspace or user. -// -// This method is generated by Databricks SDK Code Generator. -func (a *TokenManagementAPI) List(ctx context.Context, request ListTokenManagementRequest) listing.Iterator[TokenInfo] { - - getNextPage := func(ctx context.Context, req ListTokenManagementRequest) (*ListTokensResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.tokenManagementImpl.List(ctx, req) - } - getItems := func(resp *ListTokensResponse) []TokenInfo { - return resp.TokenInfos - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List all tokens. -// -// Lists all tokens associated with the specified workspace or user. -// -// This method is generated by Databricks SDK Code Generator. -func (a *TokenManagementAPI) ListAll(ctx context.Context, request ListTokenManagementRequest) ([]TokenInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[TokenInfo](ctx, iterator) -} - // TokenInfoCommentToTokenIdMap calls [TokenManagementAPI.ListAll] and creates a map of results with [TokenInfo].Comment as key and [TokenInfo].TokenId as value. // // Returns an error if there's more than one [TokenInfo] with the same .Comment. @@ -2055,40 +1837,6 @@ func (a *TokensAPI) DeleteByTokenId(ctx context.Context, tokenId string) error { }) } -// List tokens. -// -// Lists all the valid tokens for a user-workspace pair. -// -// This method is generated by Databricks SDK Code Generator. -func (a *TokensAPI) List(ctx context.Context) listing.Iterator[PublicTokenInfo] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListPublicTokensResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.tokensImpl.List(ctx) - } - getItems := func(resp *ListPublicTokensResponse) []PublicTokenInfo { - return resp.TokenInfos - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List tokens. -// -// Lists all the valid tokens for a user-workspace pair. -// -// This method is generated by Databricks SDK Code Generator. -func (a *TokensAPI) ListAll(ctx context.Context) ([]PublicTokenInfo, error) { - iterator := a.List(ctx) - return listing.ToSlice[PublicTokenInfo](ctx, iterator) -} - // PublicTokenInfoCommentToTokenIdMap calls [TokensAPI.ListAll] and creates a map of results with [PublicTokenInfo].Comment as key and [PublicTokenInfo].TokenId as value. // // Returns an error if there's more than one [PublicTokenInfo] with the same .Comment. diff --git a/service/settings/impl.go b/service/settings/impl.go index 77c0312ba..ee2a08181 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just AccountIpAccessLists API methods @@ -46,7 +48,37 @@ func (a *accountIpAccessListsImpl) Get(ctx context.Context, request GetAccountIp return &getIpAccessListResponse, err } -func (a *accountIpAccessListsImpl) List(ctx context.Context) (*GetIpAccessListsResponse, error) { +// Get access lists. +// +// Gets all IP access lists for the specified account. +func (a *accountIpAccessListsImpl) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*GetIpAccessListsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *GetIpAccessListsResponse) []IpAccessListInfo { + return resp.IpAccessLists + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get access lists. +// +// Gets all IP access lists for the specified account. +func (a *accountIpAccessListsImpl) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[IpAccessListInfo](ctx, iterator) +} + +func (a *accountIpAccessListsImpl) internalList(ctx context.Context) (*GetIpAccessListsResponse, error) { var getIpAccessListsResponse GetIpAccessListsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists", a.client.ConfiguredAccountID()) @@ -517,7 +549,37 @@ func (a *ipAccessListsImpl) Get(ctx context.Context, request GetIpAccessListRequ return &fetchIpAccessListResponse, err } -func (a *ipAccessListsImpl) List(ctx context.Context) (*ListIpAccessListResponse, error) { +// Get access lists. +// +// Gets all IP access lists for the specified workspace. +func (a *ipAccessListsImpl) List(ctx context.Context) listing.Iterator[IpAccessListInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListIpAccessListResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListIpAccessListResponse) []IpAccessListInfo { + return resp.IpAccessLists + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get access lists. +// +// Gets all IP access lists for the specified workspace. +func (a *ipAccessListsImpl) ListAll(ctx context.Context) ([]IpAccessListInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[IpAccessListInfo](ctx, iterator) +} + +func (a *ipAccessListsImpl) internalList(ctx context.Context) (*ListIpAccessListResponse, error) { var listIpAccessListResponse ListIpAccessListResponse path := "/api/2.0/ip-access-lists" @@ -616,7 +678,42 @@ func (a *networkConnectivityImpl) GetPrivateEndpointRule(ctx context.Context, re return &nccAzurePrivateEndpointRule, err } -func (a *networkConnectivityImpl) ListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { +// List network connectivity configurations. +// +// Gets an array of network connectivity configurations. +func (a *networkConnectivityImpl) ListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) listing.Iterator[NetworkConnectivityConfiguration] { + + getNextPage := func(ctx context.Context, req ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListNetworkConnectivityConfigurations(ctx, req) + } + getItems := func(resp *ListNetworkConnectivityConfigurationsResponse) []NetworkConnectivityConfiguration { + return resp.Items + } + getNextReq := func(resp *ListNetworkConnectivityConfigurationsResponse) *ListNetworkConnectivityConfigurationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List network connectivity configurations. +// +// Gets an array of network connectivity configurations. +func (a *networkConnectivityImpl) ListNetworkConnectivityConfigurationsAll(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) ([]NetworkConnectivityConfiguration, error) { + iterator := a.ListNetworkConnectivityConfigurations(ctx, request) + return listing.ToSlice[NetworkConnectivityConfiguration](ctx, iterator) +} + +func (a *networkConnectivityImpl) internalListNetworkConnectivityConfigurations(ctx context.Context, request ListNetworkConnectivityConfigurationsRequest) (*ListNetworkConnectivityConfigurationsResponse, error) { var listNetworkConnectivityConfigurationsResponse ListNetworkConnectivityConfigurationsResponse path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs", a.client.ConfiguredAccountID()) queryParams := make(map[string]any) @@ -626,7 +723,42 @@ func (a *networkConnectivityImpl) ListNetworkConnectivityConfigurations(ctx cont return &listNetworkConnectivityConfigurationsResponse, err } -func (a *networkConnectivityImpl) ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { +// List private endpoint rules. +// +// Gets an array of private endpoint rules. +func (a *networkConnectivityImpl) ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) listing.Iterator[NccAzurePrivateEndpointRule] { + + getNextPage := func(ctx context.Context, req ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListPrivateEndpointRules(ctx, req) + } + getItems := func(resp *ListNccAzurePrivateEndpointRulesResponse) []NccAzurePrivateEndpointRule { + return resp.Items + } + getNextReq := func(resp *ListNccAzurePrivateEndpointRulesResponse) *ListPrivateEndpointRulesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List private endpoint rules. +// +// Gets an array of private endpoint rules. +func (a *networkConnectivityImpl) ListPrivateEndpointRulesAll(ctx context.Context, request ListPrivateEndpointRulesRequest) ([]NccAzurePrivateEndpointRule, error) { + iterator := a.ListPrivateEndpointRules(ctx, request) + return listing.ToSlice[NccAzurePrivateEndpointRule](ctx, iterator) +} + +func (a *networkConnectivityImpl) internalListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) { var listNccAzurePrivateEndpointRulesResponse ListNccAzurePrivateEndpointRulesResponse path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId) queryParams := make(map[string]any) @@ -672,7 +804,42 @@ func (a *notificationDestinationsImpl) Get(ctx context.Context, request GetNotif return ¬ificationDestination, err } -func (a *notificationDestinationsImpl) List(ctx context.Context, request ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { +// List notification destinations. +// +// Lists notification destinations. +func (a *notificationDestinationsImpl) List(ctx context.Context, request ListNotificationDestinationsRequest) listing.Iterator[ListNotificationDestinationsResult] { + + getNextPage := func(ctx context.Context, req ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListNotificationDestinationsResponse) []ListNotificationDestinationsResult { + return resp.Results + } + getNextReq := func(resp *ListNotificationDestinationsResponse) *ListNotificationDestinationsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List notification destinations. +// +// Lists notification destinations. +func (a *notificationDestinationsImpl) ListAll(ctx context.Context, request ListNotificationDestinationsRequest) ([]ListNotificationDestinationsResult, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ListNotificationDestinationsResult](ctx, iterator) +} + +func (a *notificationDestinationsImpl) internalList(ctx context.Context, request ListNotificationDestinationsRequest) (*ListNotificationDestinationsResponse, error) { var listNotificationDestinationsResponse ListNotificationDestinationsResponse path := "/api/2.0/notification-destinations" queryParams := make(map[string]any) @@ -826,7 +993,36 @@ func (a *tokenManagementImpl) GetPermissions(ctx context.Context) (*TokenPermiss return &tokenPermissions, err } -func (a *tokenManagementImpl) List(ctx context.Context, request ListTokenManagementRequest) (*ListTokensResponse, error) { +// List all tokens. +// +// Lists all tokens associated with the specified workspace or user. +func (a *tokenManagementImpl) List(ctx context.Context, request ListTokenManagementRequest) listing.Iterator[TokenInfo] { + + getNextPage := func(ctx context.Context, req ListTokenManagementRequest) (*ListTokensResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListTokensResponse) []TokenInfo { + return resp.TokenInfos + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List all tokens. +// +// Lists all tokens associated with the specified workspace or user. +func (a *tokenManagementImpl) ListAll(ctx context.Context, request ListTokenManagementRequest) ([]TokenInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[TokenInfo](ctx, iterator) +} + +func (a *tokenManagementImpl) internalList(ctx context.Context, request ListTokenManagementRequest) (*ListTokensResponse, error) { var listTokensResponse ListTokensResponse path := "/api/2.0/token-management/tokens" queryParams := make(map[string]any) @@ -885,7 +1081,37 @@ func (a *tokensImpl) Delete(ctx context.Context, request RevokeTokenRequest) err return err } -func (a *tokensImpl) List(ctx context.Context) (*ListPublicTokensResponse, error) { +// List tokens. +// +// Lists all the valid tokens for a user-workspace pair. +func (a *tokensImpl) List(ctx context.Context) listing.Iterator[PublicTokenInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListPublicTokensResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListPublicTokensResponse) []PublicTokenInfo { + return resp.TokenInfos + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List tokens. +// +// Lists all the valid tokens for a user-workspace pair. +func (a *tokensImpl) ListAll(ctx context.Context) ([]PublicTokenInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[PublicTokenInfo](ctx, iterator) +} + +func (a *tokensImpl) internalList(ctx context.Context) (*ListPublicTokensResponse, error) { var listPublicTokensResponse ListPublicTokensResponse path := "/api/2.0/token/list" diff --git a/service/sharing/api.go b/service/sharing/api.go index 57d1f45e2..5aa866541 100755 --- a/service/sharing/api.go +++ b/service/sharing/api.go @@ -146,51 +146,6 @@ func (a *ProvidersAPI) GetByName(ctx context.Context, name string) (*ProviderInf }) } -// List providers. -// -// Gets an array of available authentication providers. The caller must either -// be a metastore admin or the owner of the providers. Providers not owned by -// the caller are not included in the response. There is no guarantee of a -// specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProvidersAPI) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { - - getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providersImpl.List(ctx, req) - } - getItems := func(resp *ListProvidersResponse) []ProviderInfo { - return resp.Providers - } - getNextReq := func(resp *ListProvidersResponse) *ListProvidersRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List providers. -// -// Gets an array of available authentication providers. The caller must either -// be a metastore admin or the owner of the providers. Providers not owned by -// the caller are not included in the response. There is no guarantee of a -// specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProvidersAPI) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ProviderInfo](ctx, iterator) -} - // ProviderInfoNameToMetastoreIdMap calls [ProvidersAPI.ListAll] and creates a map of results with [ProviderInfo].Name as key and [ProviderInfo].MetastoreId as value. // // Returns an error if there's more than one [ProviderInfo] with the same .Name. @@ -216,50 +171,13 @@ func (a *ProvidersAPI) ProviderInfoNameToMetastoreIdMap(ctx context.Context, req return mapping, nil } -// List shares by Provider. -// -// Gets an array of a specified provider's shares within the metastore where: -// -// * the caller is a metastore admin, or * the caller is the owner. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProvidersAPI) ListShares(ctx context.Context, request ListSharesRequest) listing.Iterator[ProviderShare] { - - getNextPage := func(ctx context.Context, req ListSharesRequest) (*ListProviderSharesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.providersImpl.ListShares(ctx, req) - } - getItems := func(resp *ListProviderSharesResponse) []ProviderShare { - return resp.Shares - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List shares by Provider. -// -// Gets an array of a specified provider's shares within the metastore where: -// -// * the caller is a metastore admin, or * the caller is the owner. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ProvidersAPI) ListSharesAll(ctx context.Context, request ListSharesRequest) ([]ProviderShare, error) { - iterator := a.ListShares(ctx, request) - return listing.ToSlice[ProviderShare](ctx, iterator) -} - // List shares by Provider. // // Gets an array of a specified provider's shares within the metastore where: // // * the caller is a metastore admin, or * the caller is the owner. func (a *ProvidersAPI) ListSharesByName(ctx context.Context, name string) (*ListProviderSharesResponse, error) { - return a.providersImpl.ListShares(ctx, ListSharesRequest{ + return a.providersImpl.internalListShares(ctx, ListSharesRequest{ Name: name, }) } @@ -462,51 +380,6 @@ func (a *RecipientsAPI) GetByName(ctx context.Context, name string) (*RecipientI }) } -// List share recipients. -// -// Gets an array of all share recipients within the current metastore where: -// -// * the caller is a metastore admin, or * the caller is the owner. There is no -// guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *RecipientsAPI) List(ctx context.Context, request ListRecipientsRequest) listing.Iterator[RecipientInfo] { - - getNextPage := func(ctx context.Context, req ListRecipientsRequest) (*ListRecipientsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.recipientsImpl.List(ctx, req) - } - getItems := func(resp *ListRecipientsResponse) []RecipientInfo { - return resp.Recipients - } - getNextReq := func(resp *ListRecipientsResponse) *ListRecipientsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List share recipients. -// -// Gets an array of all share recipients within the current metastore where: -// -// * the caller is a metastore admin, or * the caller is the owner. There is no -// guarantee of a specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *RecipientsAPI) ListAll(ctx context.Context, request ListRecipientsRequest) ([]RecipientInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[RecipientInfo](ctx, iterator) -} - // Get recipient share permissions. // // Gets the share permissions for the specified Recipient. The caller must be a @@ -649,49 +522,6 @@ func (a *SharesAPI) GetByName(ctx context.Context, name string) (*ShareInfo, err }) } -// List shares. -// -// Gets an array of data object shares from the metastore. The caller must be a -// metastore admin or the owner of the share. There is no guarantee of a -// specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SharesAPI) List(ctx context.Context, request ListSharesRequest) listing.Iterator[ShareInfo] { - - getNextPage := func(ctx context.Context, req ListSharesRequest) (*ListSharesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.sharesImpl.List(ctx, req) - } - getItems := func(resp *ListSharesResponse) []ShareInfo { - return resp.Shares - } - getNextReq := func(resp *ListSharesResponse) *ListSharesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List shares. -// -// Gets an array of data object shares from the metastore. The caller must be a -// metastore admin or the owner of the share. There is no guarantee of a -// specific ordering of the elements in the array. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SharesAPI) ListAll(ctx context.Context, request ListSharesRequest) ([]ShareInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ShareInfo](ctx, iterator) -} - // Get permissions. // // Gets the permissions for a data share from the metastore. The caller must be diff --git a/service/sharing/impl.go b/service/sharing/impl.go index e6b6d33ec..a3d8230ab 100755 --- a/service/sharing/impl.go +++ b/service/sharing/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" "golang.org/x/exp/slices" "github.com/databricks/databricks-sdk-go/service/catalog" @@ -49,7 +51,50 @@ func (a *providersImpl) Get(ctx context.Context, request GetProviderRequest) (*P return &providerInfo, err } -func (a *providersImpl) List(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { +// List providers. +// +// Gets an array of available authentication providers. The caller must either +// be a metastore admin or the owner of the providers. Providers not owned by +// the caller are not included in the response. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *providersImpl) List(ctx context.Context, request ListProvidersRequest) listing.Iterator[ProviderInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListProvidersRequest) (*ListProvidersResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListProvidersResponse) []ProviderInfo { + return resp.Providers + } + getNextReq := func(resp *ListProvidersResponse) *ListProvidersRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List providers. +// +// Gets an array of available authentication providers. The caller must either +// be a metastore admin or the owner of the providers. Providers not owned by +// the caller are not included in the response. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *providersImpl) ListAll(ctx context.Context, request ListProvidersRequest) ([]ProviderInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ProviderInfo](ctx, iterator) +} + +func (a *providersImpl) internalList(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) { var listProvidersResponse ListProvidersResponse path := "/api/2.1/unity-catalog/providers" queryParams := make(map[string]any) @@ -59,7 +104,48 @@ func (a *providersImpl) List(ctx context.Context, request ListProvidersRequest) return &listProvidersResponse, err } -func (a *providersImpl) ListShares(ctx context.Context, request ListSharesRequest) (*ListProviderSharesResponse, error) { +// List shares by Provider. +// +// Gets an array of a specified provider's shares within the metastore where: +// +// * the caller is a metastore admin, or * the caller is the owner. +func (a *providersImpl) ListShares(ctx context.Context, request ListSharesRequest) listing.Iterator[ProviderShare] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListSharesRequest) (*ListProviderSharesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListShares(ctx, req) + } + getItems := func(resp *ListProviderSharesResponse) []ProviderShare { + return resp.Shares + } + getNextReq := func(resp *ListProviderSharesResponse) *ListSharesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List shares by Provider. +// +// Gets an array of a specified provider's shares within the metastore where: +// +// * the caller is a metastore admin, or * the caller is the owner. +func (a *providersImpl) ListSharesAll(ctx context.Context, request ListSharesRequest) ([]ProviderShare, error) { + iterator := a.ListShares(ctx, request) + return listing.ToSlice[ProviderShare](ctx, iterator) +} + +func (a *providersImpl) internalListShares(ctx context.Context, request ListSharesRequest) (*ListProviderSharesResponse, error) { var listProviderSharesResponse ListProviderSharesResponse path := fmt.Sprintf("/api/2.1/unity-catalog/providers/%v/shares", request.Name) queryParams := make(map[string]any) @@ -141,7 +227,50 @@ func (a *recipientsImpl) Get(ctx context.Context, request GetRecipientRequest) ( return &recipientInfo, err } -func (a *recipientsImpl) List(ctx context.Context, request ListRecipientsRequest) (*ListRecipientsResponse, error) { +// List share recipients. +// +// Gets an array of all share recipients within the current metastore where: +// +// * the caller is a metastore admin, or * the caller is the owner. There is no +// guarantee of a specific ordering of the elements in the array. +func (a *recipientsImpl) List(ctx context.Context, request ListRecipientsRequest) listing.Iterator[RecipientInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListRecipientsRequest) (*ListRecipientsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListRecipientsResponse) []RecipientInfo { + return resp.Recipients + } + getNextReq := func(resp *ListRecipientsResponse) *ListRecipientsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List share recipients. +// +// Gets an array of all share recipients within the current metastore where: +// +// * the caller is a metastore admin, or * the caller is the owner. There is no +// guarantee of a specific ordering of the elements in the array. +func (a *recipientsImpl) ListAll(ctx context.Context, request ListRecipientsRequest) ([]RecipientInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[RecipientInfo](ctx, iterator) +} + +func (a *recipientsImpl) internalList(ctx context.Context, request ListRecipientsRequest) (*ListRecipientsResponse, error) { var listRecipientsResponse ListRecipientsResponse path := "/api/2.1/unity-catalog/recipients" queryParams := make(map[string]any) @@ -219,7 +348,48 @@ func (a *sharesImpl) Get(ctx context.Context, request GetShareRequest) (*ShareIn return &shareInfo, err } -func (a *sharesImpl) List(ctx context.Context, request ListSharesRequest) (*ListSharesResponse, error) { +// List shares. +// +// Gets an array of data object shares from the metastore. The caller must be a +// metastore admin or the owner of the share. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *sharesImpl) List(ctx context.Context, request ListSharesRequest) listing.Iterator[ShareInfo] { + + request.ForceSendFields = append(request.ForceSendFields, "MaxResults") + + getNextPage := func(ctx context.Context, req ListSharesRequest) (*ListSharesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListSharesResponse) []ShareInfo { + return resp.Shares + } + getNextReq := func(resp *ListSharesResponse) *ListSharesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List shares. +// +// Gets an array of data object shares from the metastore. The caller must be a +// metastore admin or the owner of the share. There is no guarantee of a +// specific ordering of the elements in the array. +func (a *sharesImpl) ListAll(ctx context.Context, request ListSharesRequest) ([]ShareInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ShareInfo](ctx, iterator) +} + +func (a *sharesImpl) internalList(ctx context.Context, request ListSharesRequest) (*ListSharesResponse, error) { var listSharesResponse ListSharesResponse path := "/api/2.1/unity-catalog/shares" queryParams := make(map[string]any) diff --git a/service/sharing/interface.go b/service/sharing/interface.go index ffbf87b16..3e74d4694 100755 --- a/service/sharing/interface.go +++ b/service/sharing/interface.go @@ -49,7 +49,7 @@ type ProvidersService interface { // // * the caller is a metastore admin, or * the caller is the owner. // - // Use ListSharesAll() to get all ProviderShare instances + // Use ListSharesAll() to get all ProviderShare instances, which will iterate over every result page. ListShares(ctx context.Context, request ListSharesRequest) (*ListProviderSharesResponse, error) // Update a provider. diff --git a/service/sql/api.go b/service/sql/api.go index e3a899a35..409dc26c3 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -124,49 +124,6 @@ func (a *AlertsAPI) GetById(ctx context.Context, id string) (*Alert, error) { }) } -// List alerts. -// -// Gets a list of alerts accessible to the user, ordered by creation time. -// **Warning:** Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AlertsAPI) List(ctx context.Context, request ListAlertsRequest) listing.Iterator[ListAlertsResponseAlert] { - - getNextPage := func(ctx context.Context, req ListAlertsRequest) (*ListAlertsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.alertsImpl.List(ctx, req) - } - getItems := func(resp *ListAlertsResponse) []ListAlertsResponseAlert { - return resp.Results - } - getNextReq := func(resp *ListAlertsResponse) *ListAlertsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List alerts. -// -// Gets a list of alerts accessible to the user, ordered by creation time. -// **Warning:** Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// This method is generated by Databricks SDK Code Generator. -func (a *AlertsAPI) ListAll(ctx context.Context, request ListAlertsRequest) ([]ListAlertsResponseAlert, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ListAlertsResponseAlert](ctx, iterator) -} - // ListAlertsResponseAlertDisplayNameToIdMap calls [AlertsAPI.ListAll] and creates a map of results with [ListAlertsResponseAlert].DisplayName as key and [ListAlertsResponseAlert].Id as value. // // Returns an error if there's more than one [ListAlertsResponseAlert] with the same .DisplayName. @@ -578,59 +535,6 @@ func (a *DashboardsAPI) GetByDashboardId(ctx context.Context, dashboardId string }) } -// Get dashboard objects. -// -// Fetch a paginated list of dashboard objects. -// -// **Warning**: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// This method is generated by Databricks SDK Code Generator. -func (a *DashboardsAPI) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { - - request.Page = 1 // start iterating from the first page - - getNextPage := func(ctx context.Context, req ListDashboardsRequest) (*ListResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.dashboardsImpl.List(ctx, req) - } - getItems := func(resp *ListResponse) []Dashboard { - return resp.Results - } - getNextReq := func(resp *ListResponse) *ListDashboardsRequest { - if len(getItems(resp)) == 0 { - return nil - } - request.Page = resp.Page + 1 - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - dedupedIterator := listing.NewDedupeIterator[Dashboard, string]( - iterator, - func(item Dashboard) string { - return item.Id - }) - return dedupedIterator -} - -// Get dashboard objects. -// -// Fetch a paginated list of dashboard objects. -// -// **Warning**: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// This method is generated by Databricks SDK Code Generator. -func (a *DashboardsAPI) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[Dashboard, int](ctx, iterator, request.PageSize) - -} - // DashboardNameToIdMap calls [DashboardsAPI.ListAll] and creates a map of results with [Dashboard].Name as key and [Dashboard].Id as value. // // Returns an error if there's more than one [Dashboard] with the same .Name. @@ -1022,49 +926,6 @@ func (a *QueriesAPI) GetById(ctx context.Context, id string) (*Query, error) { }) } -// List queries. -// -// Gets a list of queries accessible to the user, ordered by creation time. -// **Warning:** Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// This method is generated by Databricks SDK Code Generator. -func (a *QueriesAPI) List(ctx context.Context, request ListQueriesRequest) listing.Iterator[ListQueryObjectsResponseQuery] { - - getNextPage := func(ctx context.Context, req ListQueriesRequest) (*ListQueryObjectsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.queriesImpl.List(ctx, req) - } - getItems := func(resp *ListQueryObjectsResponse) []ListQueryObjectsResponseQuery { - return resp.Results - } - getNextReq := func(resp *ListQueryObjectsResponse) *ListQueriesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List queries. -// -// Gets a list of queries accessible to the user, ordered by creation time. -// **Warning:** Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// This method is generated by Databricks SDK Code Generator. -func (a *QueriesAPI) ListAll(ctx context.Context, request ListQueriesRequest) ([]ListQueryObjectsResponseQuery, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ListQueryObjectsResponseQuery](ctx, iterator) -} - // ListQueryObjectsResponseQueryDisplayNameToIdMap calls [QueriesAPI.ListAll] and creates a map of results with [ListQueryObjectsResponseQuery].DisplayName as key and [ListQueryObjectsResponseQuery].Id as value. // // Returns an error if there's more than one [ListQueryObjectsResponseQuery] with the same .DisplayName. @@ -1118,50 +979,11 @@ func (a *QueriesAPI) GetByDisplayName(ctx context.Context, name string) (*ListQu return &alternatives[0], nil } -// List visualizations on a query. -// -// Gets a list of visualizations on a query. -// -// This method is generated by Databricks SDK Code Generator. -func (a *QueriesAPI) ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) listing.Iterator[Visualization] { - - getNextPage := func(ctx context.Context, req ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.queriesImpl.ListVisualizations(ctx, req) - } - getItems := func(resp *ListVisualizationsForQueryResponse) []Visualization { - return resp.Results - } - getNextReq := func(resp *ListVisualizationsForQueryResponse) *ListVisualizationsForQueryRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List visualizations on a query. -// -// Gets a list of visualizations on a query. -// -// This method is generated by Databricks SDK Code Generator. -func (a *QueriesAPI) ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error) { - iterator := a.ListVisualizations(ctx, request) - return listing.ToSlice[Visualization](ctx, iterator) -} - // List visualizations on a query. // // Gets a list of visualizations on a query. func (a *QueriesAPI) ListVisualizationsById(ctx context.Context, id string) (*ListVisualizationsForQueryResponse, error) { - return a.queriesImpl.ListVisualizations(ctx, ListVisualizationsForQueryRequest{ + return a.queriesImpl.internalListVisualizations(ctx, ListVisualizationsForQueryRequest{ Id: id, }) } @@ -1358,71 +1180,6 @@ func (a *QueriesLegacyAPI) GetByQueryId(ctx context.Context, queryId string) (*L }) } -// Get a list of queries. -// -// Gets a list of queries. Optionally, this list can be filtered by a search -// term. -// -// **Warning**: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// **Note**: A new version of the Databricks SQL API is now available. Please -// use :method:queries/list instead. [Learn more] -// -// This method is generated by Databricks SDK Code Generator. -// -// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *QueriesLegacyAPI) List(ctx context.Context, request ListQueriesLegacyRequest) listing.Iterator[LegacyQuery] { - - request.Page = 1 // start iterating from the first page - - getNextPage := func(ctx context.Context, req ListQueriesLegacyRequest) (*QueryList, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.queriesLegacyImpl.List(ctx, req) - } - getItems := func(resp *QueryList) []LegacyQuery { - return resp.Results - } - getNextReq := func(resp *QueryList) *ListQueriesLegacyRequest { - if len(getItems(resp)) == 0 { - return nil - } - request.Page = resp.Page + 1 - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - dedupedIterator := listing.NewDedupeIterator[LegacyQuery, string]( - iterator, - func(item LegacyQuery) string { - return item.Id - }) - return dedupedIterator -} - -// Get a list of queries. -// -// Gets a list of queries. Optionally, this list can be filtered by a search -// term. -// -// **Warning**: Calling this API concurrently 10 or more times could result in -// throttling, service degradation, or a temporary ban. -// -// **Note**: A new version of the Databricks SQL API is now available. Please -// use :method:queries/list instead. [Learn more] -// -// This method is generated by Databricks SDK Code Generator. -// -// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html -func (a *QueriesLegacyAPI) ListAll(ctx context.Context, request ListQueriesLegacyRequest) ([]LegacyQuery, error) { - iterator := a.List(ctx, request) - return listing.ToSliceN[LegacyQuery, int](ctx, iterator, request.PageSize) - -} - // LegacyQueryNameToIdMap calls [QueriesLegacyAPI.ListAll] and creates a map of results with [LegacyQuery].Name as key and [LegacyQuery].Id as value. // // Returns an error if there's more than one [LegacyQuery] with the same .Name. @@ -2276,39 +2033,6 @@ func (a *WarehousesAPI) GetPermissionsByWarehouseId(ctx context.Context, warehou }) } -// List warehouses. -// -// Lists all SQL warehouses that a user has manager permissions on. -// -// This method is generated by Databricks SDK Code Generator. -func (a *WarehousesAPI) List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo] { - - getNextPage := func(ctx context.Context, req ListWarehousesRequest) (*ListWarehousesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.warehousesImpl.List(ctx, req) - } - getItems := func(resp *ListWarehousesResponse) []EndpointInfo { - return resp.Warehouses - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List warehouses. -// -// Lists all SQL warehouses that a user has manager permissions on. -// -// This method is generated by Databricks SDK Code Generator. -func (a *WarehousesAPI) ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[EndpointInfo](ctx, iterator) -} - // EndpointInfoNameToIdMap calls [WarehousesAPI.ListAll] and creates a map of results with [EndpointInfo].Name as key and [EndpointInfo].Id as value. // // Returns an error if there's more than one [EndpointInfo] with the same .Name. diff --git a/service/sql/impl.go b/service/sql/impl.go index 6dd38e9c9..e5f404388 100755 --- a/service/sql/impl.go +++ b/service/sql/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just Alerts API methods @@ -46,7 +48,46 @@ func (a *alertsImpl) Get(ctx context.Context, request GetAlertRequest) (*Alert, return &alert, err } -func (a *alertsImpl) List(ctx context.Context, request ListAlertsRequest) (*ListAlertsResponse, error) { +// List alerts. +// +// Gets a list of alerts accessible to the user, ordered by creation time. +// **Warning:** Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *alertsImpl) List(ctx context.Context, request ListAlertsRequest) listing.Iterator[ListAlertsResponseAlert] { + + getNextPage := func(ctx context.Context, req ListAlertsRequest) (*ListAlertsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListAlertsResponse) []ListAlertsResponseAlert { + return resp.Results + } + getNextReq := func(resp *ListAlertsResponse) *ListAlertsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List alerts. +// +// Gets a list of alerts accessible to the user, ordered by creation time. +// **Warning:** Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *alertsImpl) ListAll(ctx context.Context, request ListAlertsRequest) ([]ListAlertsResponseAlert, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ListAlertsResponseAlert](ctx, iterator) +} + +func (a *alertsImpl) internalList(ctx context.Context, request ListAlertsRequest) (*ListAlertsResponse, error) { var listAlertsResponse ListAlertsResponse path := "/api/2.0/sql/alerts" queryParams := make(map[string]any) @@ -197,7 +238,56 @@ func (a *dashboardsImpl) Get(ctx context.Context, request GetDashboardRequest) ( return &dashboard, err } -func (a *dashboardsImpl) List(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error) { +// Get dashboard objects. +// +// Fetch a paginated list of dashboard objects. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *dashboardsImpl) List(ctx context.Context, request ListDashboardsRequest) listing.Iterator[Dashboard] { + + request.Page = 1 // start iterating from the first page + + getNextPage := func(ctx context.Context, req ListDashboardsRequest) (*ListResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListResponse) []Dashboard { + return resp.Results + } + getNextReq := func(resp *ListResponse) *ListDashboardsRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.Page = resp.Page + 1 + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[Dashboard, string]( + iterator, + func(item Dashboard) string { + return item.Id + }) + return dedupedIterator +} + +// Get dashboard objects. +// +// Fetch a paginated list of dashboard objects. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *dashboardsImpl) ListAll(ctx context.Context, request ListDashboardsRequest) ([]Dashboard, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[Dashboard, int](ctx, iterator, request.PageSize) + +} + +func (a *dashboardsImpl) internalList(ctx context.Context, request ListDashboardsRequest) (*ListResponse, error) { var listResponse ListResponse path := "/api/2.0/preview/sql/dashboards" queryParams := make(map[string]any) @@ -316,7 +406,46 @@ func (a *queriesImpl) Get(ctx context.Context, request GetQueryRequest) (*Query, return &query, err } -func (a *queriesImpl) List(ctx context.Context, request ListQueriesRequest) (*ListQueryObjectsResponse, error) { +// List queries. +// +// Gets a list of queries accessible to the user, ordered by creation time. +// **Warning:** Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *queriesImpl) List(ctx context.Context, request ListQueriesRequest) listing.Iterator[ListQueryObjectsResponseQuery] { + + getNextPage := func(ctx context.Context, req ListQueriesRequest) (*ListQueryObjectsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListQueryObjectsResponse) []ListQueryObjectsResponseQuery { + return resp.Results + } + getNextReq := func(resp *ListQueryObjectsResponse) *ListQueriesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List queries. +// +// Gets a list of queries accessible to the user, ordered by creation time. +// **Warning:** Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +func (a *queriesImpl) ListAll(ctx context.Context, request ListQueriesRequest) ([]ListQueryObjectsResponseQuery, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ListQueryObjectsResponseQuery](ctx, iterator) +} + +func (a *queriesImpl) internalList(ctx context.Context, request ListQueriesRequest) (*ListQueryObjectsResponse, error) { var listQueryObjectsResponse ListQueryObjectsResponse path := "/api/2.0/sql/queries" queryParams := make(map[string]any) @@ -326,7 +455,42 @@ func (a *queriesImpl) List(ctx context.Context, request ListQueriesRequest) (*Li return &listQueryObjectsResponse, err } -func (a *queriesImpl) ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { +// List visualizations on a query. +// +// Gets a list of visualizations on a query. +func (a *queriesImpl) ListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) listing.Iterator[Visualization] { + + getNextPage := func(ctx context.Context, req ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListVisualizations(ctx, req) + } + getItems := func(resp *ListVisualizationsForQueryResponse) []Visualization { + return resp.Results + } + getNextReq := func(resp *ListVisualizationsForQueryResponse) *ListVisualizationsForQueryRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List visualizations on a query. +// +// Gets a list of visualizations on a query. +func (a *queriesImpl) ListVisualizationsAll(ctx context.Context, request ListVisualizationsForQueryRequest) ([]Visualization, error) { + iterator := a.ListVisualizations(ctx, request) + return listing.ToSlice[Visualization](ctx, iterator) +} + +func (a *queriesImpl) internalListVisualizations(ctx context.Context, request ListVisualizationsForQueryRequest) (*ListVisualizationsForQueryResponse, error) { var listVisualizationsForQueryResponse ListVisualizationsForQueryResponse path := fmt.Sprintf("/api/2.0/sql/queries/%v/visualizations", request.Id) queryParams := make(map[string]any) @@ -383,7 +547,68 @@ func (a *queriesLegacyImpl) Get(ctx context.Context, request GetQueriesLegacyReq return &legacyQuery, err } -func (a *queriesLegacyImpl) List(ctx context.Context, request ListQueriesLegacyRequest) (*QueryList, error) { +// Get a list of queries. +// +// Gets a list of queries. Optionally, this list can be filtered by a search +// term. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:queries/list instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *queriesLegacyImpl) List(ctx context.Context, request ListQueriesLegacyRequest) listing.Iterator[LegacyQuery] { + + request.Page = 1 // start iterating from the first page + + getNextPage := func(ctx context.Context, req ListQueriesLegacyRequest) (*QueryList, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *QueryList) []LegacyQuery { + return resp.Results + } + getNextReq := func(resp *QueryList) *ListQueriesLegacyRequest { + if len(getItems(resp)) == 0 { + return nil + } + request.Page = resp.Page + 1 + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + dedupedIterator := listing.NewDedupeIterator[LegacyQuery, string]( + iterator, + func(item LegacyQuery) string { + return item.Id + }) + return dedupedIterator +} + +// Get a list of queries. +// +// Gets a list of queries. Optionally, this list can be filtered by a search +// term. +// +// **Warning**: Calling this API concurrently 10 or more times could result in +// throttling, service degradation, or a temporary ban. +// +// **Note**: A new version of the Databricks SQL API is now available. Please +// use :method:queries/list instead. [Learn more] +// +// [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html +func (a *queriesLegacyImpl) ListAll(ctx context.Context, request ListQueriesLegacyRequest) ([]LegacyQuery, error) { + iterator := a.List(ctx, request) + return listing.ToSliceN[LegacyQuery, int](ctx, iterator, request.PageSize) + +} + +func (a *queriesLegacyImpl) internalList(ctx context.Context, request ListQueriesLegacyRequest) (*QueryList, error) { var queryList QueryList path := "/api/2.0/preview/sql/queries" queryParams := make(map[string]any) @@ -640,7 +865,36 @@ func (a *warehousesImpl) GetWorkspaceWarehouseConfig(ctx context.Context) (*GetW return &getWorkspaceWarehouseConfigResponse, err } -func (a *warehousesImpl) List(ctx context.Context, request ListWarehousesRequest) (*ListWarehousesResponse, error) { +// List warehouses. +// +// Lists all SQL warehouses that a user has manager permissions on. +func (a *warehousesImpl) List(ctx context.Context, request ListWarehousesRequest) listing.Iterator[EndpointInfo] { + + getNextPage := func(ctx context.Context, req ListWarehousesRequest) (*ListWarehousesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListWarehousesResponse) []EndpointInfo { + return resp.Warehouses + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List warehouses. +// +// Lists all SQL warehouses that a user has manager permissions on. +func (a *warehousesImpl) ListAll(ctx context.Context, request ListWarehousesRequest) ([]EndpointInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[EndpointInfo](ctx, iterator) +} + +func (a *warehousesImpl) internalList(ctx context.Context, request ListWarehousesRequest) (*ListWarehousesResponse, error) { var listWarehousesResponse ListWarehousesResponse path := "/api/2.0/sql/warehouses" queryParams := make(map[string]any) diff --git a/service/vectorsearch/api.go b/service/vectorsearch/api.go index f4371301a..c0e3af2fb 100755 --- a/service/vectorsearch/api.go +++ b/service/vectorsearch/api.go @@ -186,41 +186,6 @@ func (a *VectorSearchEndpointsAPI) GetEndpointByEndpointName(ctx context.Context }) } -// List all endpoints. -// -// This method is generated by Databricks SDK Code Generator. -func (a *VectorSearchEndpointsAPI) ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] { - - getNextPage := func(ctx context.Context, req ListEndpointsRequest) (*ListEndpointResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.vectorSearchEndpointsImpl.ListEndpoints(ctx, req) - } - getItems := func(resp *ListEndpointResponse) []EndpointInfo { - return resp.Endpoints - } - getNextReq := func(resp *ListEndpointResponse) *ListEndpointsRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List all endpoints. -// -// This method is generated by Databricks SDK Code Generator. -func (a *VectorSearchEndpointsAPI) ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) { - iterator := a.ListEndpoints(ctx, request) - return listing.ToSlice[EndpointInfo](ctx, iterator) -} - type VectorSearchIndexesInterface interface { // Create an index. @@ -334,42 +299,3 @@ func (a *VectorSearchIndexesAPI) GetIndexByIndexName(ctx context.Context, indexN IndexName: indexName, }) } - -// List indexes. -// -// List all indexes in the given endpoint. -// -// This method is generated by Databricks SDK Code Generator. -func (a *VectorSearchIndexesAPI) ListIndexes(ctx context.Context, request ListIndexesRequest) listing.Iterator[MiniVectorIndex] { - - getNextPage := func(ctx context.Context, req ListIndexesRequest) (*ListVectorIndexesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.vectorSearchIndexesImpl.ListIndexes(ctx, req) - } - getItems := func(resp *ListVectorIndexesResponse) []MiniVectorIndex { - return resp.VectorIndexes - } - getNextReq := func(resp *ListVectorIndexesResponse) *ListIndexesRequest { - if resp.NextPageToken == "" { - return nil - } - request.PageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// List indexes. -// -// List all indexes in the given endpoint. -// -// This method is generated by Databricks SDK Code Generator. -func (a *VectorSearchIndexesAPI) ListIndexesAll(ctx context.Context, request ListIndexesRequest) ([]MiniVectorIndex, error) { - iterator := a.ListIndexes(ctx, request) - return listing.ToSlice[MiniVectorIndex](ctx, iterator) -} diff --git a/service/vectorsearch/impl.go b/service/vectorsearch/impl.go index 1e69a9afe..1deb7eea3 100755 --- a/service/vectorsearch/impl.go +++ b/service/vectorsearch/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just VectorSearchEndpoints API methods @@ -45,7 +47,38 @@ func (a *vectorSearchEndpointsImpl) GetEndpoint(ctx context.Context, request Get return &endpointInfo, err } -func (a *vectorSearchEndpointsImpl) ListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) { +// List all endpoints. +func (a *vectorSearchEndpointsImpl) ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] { + + getNextPage := func(ctx context.Context, req ListEndpointsRequest) (*ListEndpointResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListEndpoints(ctx, req) + } + getItems := func(resp *ListEndpointResponse) []EndpointInfo { + return resp.Endpoints + } + getNextReq := func(resp *ListEndpointResponse) *ListEndpointsRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List all endpoints. +func (a *vectorSearchEndpointsImpl) ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) { + iterator := a.ListEndpoints(ctx, request) + return listing.ToSlice[EndpointInfo](ctx, iterator) +} + +func (a *vectorSearchEndpointsImpl) internalListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) { var listEndpointResponse ListEndpointResponse path := "/api/2.0/vector-search/endpoints" queryParams := make(map[string]any) @@ -101,7 +134,42 @@ func (a *vectorSearchIndexesImpl) GetIndex(ctx context.Context, request GetIndex return &vectorIndex, err } -func (a *vectorSearchIndexesImpl) ListIndexes(ctx context.Context, request ListIndexesRequest) (*ListVectorIndexesResponse, error) { +// List indexes. +// +// List all indexes in the given endpoint. +func (a *vectorSearchIndexesImpl) ListIndexes(ctx context.Context, request ListIndexesRequest) listing.Iterator[MiniVectorIndex] { + + getNextPage := func(ctx context.Context, req ListIndexesRequest) (*ListVectorIndexesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListIndexes(ctx, req) + } + getItems := func(resp *ListVectorIndexesResponse) []MiniVectorIndex { + return resp.VectorIndexes + } + getNextReq := func(resp *ListVectorIndexesResponse) *ListIndexesRequest { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List indexes. +// +// List all indexes in the given endpoint. +func (a *vectorSearchIndexesImpl) ListIndexesAll(ctx context.Context, request ListIndexesRequest) ([]MiniVectorIndex, error) { + iterator := a.ListIndexes(ctx, request) + return listing.ToSlice[MiniVectorIndex](ctx, iterator) +} + +func (a *vectorSearchIndexesImpl) internalListIndexes(ctx context.Context, request ListIndexesRequest) (*ListVectorIndexesResponse, error) { var listVectorIndexesResponse ListVectorIndexesResponse path := "/api/2.0/vector-search/indexes" queryParams := make(map[string]any) diff --git a/service/workspace/api.go b/service/workspace/api.go index 59eac85ec..b279c4a70 100755 --- a/service/workspace/api.go +++ b/service/workspace/api.go @@ -118,42 +118,6 @@ func (a *GitCredentialsAPI) GetByCredentialId(ctx context.Context, credentialId }) } -// Get Git credentials. -// -// Lists the calling user's Git credentials. One credential per user is -// supported. -// -// This method is generated by Databricks SDK Code Generator. -func (a *GitCredentialsAPI) List(ctx context.Context) listing.Iterator[CredentialInfo] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListCredentialsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.gitCredentialsImpl.List(ctx) - } - getItems := func(resp *ListCredentialsResponse) []CredentialInfo { - return resp.Credentials - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Get Git credentials. -// -// Lists the calling user's Git credentials. One credential per user is -// supported. -// -// This method is generated by Databricks SDK Code Generator. -func (a *GitCredentialsAPI) ListAll(ctx context.Context) ([]CredentialInfo, error) { - iterator := a.List(ctx) - return listing.ToSlice[CredentialInfo](ctx, iterator) -} - // CredentialInfoGitProviderToCredentialIdMap calls [GitCredentialsAPI.ListAll] and creates a map of results with [CredentialInfo].GitProvider as key and [CredentialInfo].CredentialId as value. // // Returns an error if there's more than one [CredentialInfo] with the same .GitProvider. @@ -371,47 +335,6 @@ func (a *ReposAPI) GetPermissionsByRepoId(ctx context.Context, repoId string) (* }) } -// Get repos. -// -// Returns repos that the calling user has Manage permissions on. Use -// `next_page_token` to iterate through additional pages. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ReposAPI) List(ctx context.Context, request ListReposRequest) listing.Iterator[RepoInfo] { - - getNextPage := func(ctx context.Context, req ListReposRequest) (*ListReposResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.reposImpl.List(ctx, req) - } - getItems := func(resp *ListReposResponse) []RepoInfo { - return resp.Repos - } - getNextReq := func(resp *ListReposResponse) *ListReposRequest { - if resp.NextPageToken == "" { - return nil - } - request.NextPageToken = resp.NextPageToken - return &request - } - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - getNextReq) - return iterator -} - -// Get repos. -// -// Returns repos that the calling user has Manage permissions on. Use -// `next_page_token` to iterate through additional pages. -// -// This method is generated by Databricks SDK Code Generator. -func (a *ReposAPI) ListAll(ctx context.Context, request ListReposRequest) ([]RepoInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[RepoInfo](ctx, iterator) -} - // RepoInfoPathToIdMap calls [ReposAPI.ListAll] and creates a map of results with [RepoInfo].Path as key and [RepoInfo].Id as value. // // Returns an error if there's more than one [RepoInfo] with the same .Path. @@ -722,49 +645,6 @@ func (a *SecretsAPI) DeleteScopeByScope(ctx context.Context, scope string) error }) } -// Lists ACLs. -// -// List the ACLs for a given secret scope. Users must have the `MANAGE` -// permission to invoke this API. -// -// Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws -// `PERMISSION_DENIED` if the user does not have permission to make this API -// call. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SecretsAPI) ListAcls(ctx context.Context, request ListAclsRequest) listing.Iterator[AclItem] { - - getNextPage := func(ctx context.Context, req ListAclsRequest) (*ListAclsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.secretsImpl.ListAcls(ctx, req) - } - getItems := func(resp *ListAclsResponse) []AclItem { - return resp.Items - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// Lists ACLs. -// -// List the ACLs for a given secret scope. Users must have the `MANAGE` -// permission to invoke this API. -// -// Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws -// `PERMISSION_DENIED` if the user does not have permission to make this API -// call. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SecretsAPI) ListAclsAll(ctx context.Context, request ListAclsRequest) ([]AclItem, error) { - iterator := a.ListAcls(ctx, request) - return listing.ToSlice[AclItem](ctx, iterator) -} - // Lists ACLs. // // List the ACLs for a given secret scope. Users must have the `MANAGE` @@ -774,98 +654,11 @@ func (a *SecretsAPI) ListAclsAll(ctx context.Context, request ListAclsRequest) ( // `PERMISSION_DENIED` if the user does not have permission to make this API // call. func (a *SecretsAPI) ListAclsByScope(ctx context.Context, scope string) (*ListAclsResponse, error) { - return a.secretsImpl.ListAcls(ctx, ListAclsRequest{ + return a.secretsImpl.internalListAcls(ctx, ListAclsRequest{ Scope: scope, }) } -// List all scopes. -// -// Lists all secret scopes available in the workspace. -// -// Throws `PERMISSION_DENIED` if the user does not have permission to make this -// API call. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SecretsAPI) ListScopes(ctx context.Context) listing.Iterator[SecretScope] { - request := struct{}{} - - getNextPage := func(ctx context.Context, req struct{}) (*ListScopesResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.secretsImpl.ListScopes(ctx) - } - getItems := func(resp *ListScopesResponse) []SecretScope { - return resp.Scopes - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List all scopes. -// -// Lists all secret scopes available in the workspace. -// -// Throws `PERMISSION_DENIED` if the user does not have permission to make this -// API call. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SecretsAPI) ListScopesAll(ctx context.Context) ([]SecretScope, error) { - iterator := a.ListScopes(ctx) - return listing.ToSlice[SecretScope](ctx, iterator) -} - -// List secret keys. -// -// Lists the secret keys that are stored at this scope. This is a metadata-only -// operation; secret data cannot be retrieved using this API. Users need the -// READ permission to make this call. -// -// The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws -// `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws -// `PERMISSION_DENIED` if the user does not have permission to make this API -// call. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SecretsAPI) ListSecrets(ctx context.Context, request ListSecretsRequest) listing.Iterator[SecretMetadata] { - - getNextPage := func(ctx context.Context, req ListSecretsRequest) (*ListSecretsResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.secretsImpl.ListSecrets(ctx, req) - } - getItems := func(resp *ListSecretsResponse) []SecretMetadata { - return resp.Secrets - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List secret keys. -// -// Lists the secret keys that are stored at this scope. This is a metadata-only -// operation; secret data cannot be retrieved using this API. Users need the -// READ permission to make this call. -// -// The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws -// `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws -// `PERMISSION_DENIED` if the user does not have permission to make this API -// call. -// -// This method is generated by Databricks SDK Code Generator. -func (a *SecretsAPI) ListSecretsAll(ctx context.Context, request ListSecretsRequest) ([]SecretMetadata, error) { - iterator := a.ListSecrets(ctx, request) - return listing.ToSlice[SecretMetadata](ctx, iterator) -} - // List secret keys. // // Lists the secret keys that are stored at this scope. This is a metadata-only @@ -877,7 +670,7 @@ func (a *SecretsAPI) ListSecretsAll(ctx context.Context, request ListSecretsRequ // `PERMISSION_DENIED` if the user does not have permission to make this API // call. func (a *SecretsAPI) ListSecretsByScope(ctx context.Context, scope string) (*ListSecretsResponse, error) { - return a.secretsImpl.ListSecrets(ctx, ListSecretsRequest{ + return a.secretsImpl.internalListSecrets(ctx, ListSecretsRequest{ Scope: scope, }) } @@ -1071,43 +864,6 @@ func (a *WorkspaceAPI) GetStatusByPath(ctx context.Context, path string) (*Objec }) } -// List contents. -// -// Lists the contents of a directory, or the object if it is not a directory. If -// the input path does not exist, this call returns an error -// `RESOURCE_DOES_NOT_EXIST`. -// -// This method is generated by Databricks SDK Code Generator. -func (a *WorkspaceAPI) List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo] { - - getNextPage := func(ctx context.Context, req ListWorkspaceRequest) (*ListResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "pagination") - return a.workspaceImpl.List(ctx, req) - } - getItems := func(resp *ListResponse) []ObjectInfo { - return resp.Objects - } - - iterator := listing.NewIterator( - &request, - getNextPage, - getItems, - nil) - return iterator -} - -// List contents. -// -// Lists the contents of a directory, or the object if it is not a directory. If -// the input path does not exist, this call returns an error -// `RESOURCE_DOES_NOT_EXIST`. -// -// This method is generated by Databricks SDK Code Generator. -func (a *WorkspaceAPI) ListAll(ctx context.Context, request ListWorkspaceRequest) ([]ObjectInfo, error) { - iterator := a.List(ctx, request) - return listing.ToSlice[ObjectInfo](ctx, iterator) -} - // ObjectInfoPathToObjectIdMap calls [WorkspaceAPI.ListAll] and creates a map of results with [ObjectInfo].Path as key and [ObjectInfo].ObjectId as value. // // Returns an error if there's more than one [ObjectInfo] with the same .Path. diff --git a/service/workspace/impl.go b/service/workspace/impl.go index 63af14521..5ec0fd77e 100755 --- a/service/workspace/impl.go +++ b/service/workspace/impl.go @@ -8,6 +8,8 @@ import ( "net/http" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/useragent" ) // unexported type that holds implementations of just GitCredentials API methods @@ -46,7 +48,39 @@ func (a *gitCredentialsImpl) Get(ctx context.Context, request GetCredentialsRequ return &getCredentialsResponse, err } -func (a *gitCredentialsImpl) List(ctx context.Context) (*ListCredentialsResponse, error) { +// Get Git credentials. +// +// Lists the calling user's Git credentials. One credential per user is +// supported. +func (a *gitCredentialsImpl) List(ctx context.Context) listing.Iterator[CredentialInfo] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListCredentialsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx) + } + getItems := func(resp *ListCredentialsResponse) []CredentialInfo { + return resp.Credentials + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Get Git credentials. +// +// Lists the calling user's Git credentials. One credential per user is +// supported. +func (a *gitCredentialsImpl) ListAll(ctx context.Context) ([]CredentialInfo, error) { + iterator := a.List(ctx) + return listing.ToSlice[CredentialInfo](ctx, iterator) +} + +func (a *gitCredentialsImpl) internalList(ctx context.Context) (*ListCredentialsResponse, error) { var listCredentialsResponse ListCredentialsResponse path := "/api/2.0/git-credentials" @@ -123,7 +157,44 @@ func (a *reposImpl) GetPermissions(ctx context.Context, request GetRepoPermissio return &repoPermissions, err } -func (a *reposImpl) List(ctx context.Context, request ListReposRequest) (*ListReposResponse, error) { +// Get repos. +// +// Returns repos that the calling user has Manage permissions on. Use +// `next_page_token` to iterate through additional pages. +func (a *reposImpl) List(ctx context.Context, request ListReposRequest) listing.Iterator[RepoInfo] { + + getNextPage := func(ctx context.Context, req ListReposRequest) (*ListReposResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListReposResponse) []RepoInfo { + return resp.Repos + } + getNextReq := func(resp *ListReposResponse) *ListReposRequest { + if resp.NextPageToken == "" { + return nil + } + request.NextPageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// Get repos. +// +// Returns repos that the calling user has Manage permissions on. Use +// `next_page_token` to iterate through additional pages. +func (a *reposImpl) ListAll(ctx context.Context, request ListReposRequest) ([]RepoInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[RepoInfo](ctx, iterator) +} + +func (a *reposImpl) internalList(ctx context.Context, request ListReposRequest) (*ListReposResponse, error) { var listReposResponse ListReposResponse path := "/api/2.0/repos" queryParams := make(map[string]any) @@ -235,7 +306,46 @@ func (a *secretsImpl) GetSecret(ctx context.Context, request GetSecretRequest) ( return &getSecretResponse, err } -func (a *secretsImpl) ListAcls(ctx context.Context, request ListAclsRequest) (*ListAclsResponse, error) { +// Lists ACLs. +// +// List the ACLs for a given secret scope. Users must have the `MANAGE` +// permission to invoke this API. +// +// Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *secretsImpl) ListAcls(ctx context.Context, request ListAclsRequest) listing.Iterator[AclItem] { + + getNextPage := func(ctx context.Context, req ListAclsRequest) (*ListAclsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListAcls(ctx, req) + } + getItems := func(resp *ListAclsResponse) []AclItem { + return resp.Items + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// Lists ACLs. +// +// List the ACLs for a given secret scope. Users must have the `MANAGE` +// permission to invoke this API. +// +// Throws `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *secretsImpl) ListAclsAll(ctx context.Context, request ListAclsRequest) ([]AclItem, error) { + iterator := a.ListAcls(ctx, request) + return listing.ToSlice[AclItem](ctx, iterator) +} + +func (a *secretsImpl) internalListAcls(ctx context.Context, request ListAclsRequest) (*ListAclsResponse, error) { var listAclsResponse ListAclsResponse path := "/api/2.0/secrets/acls/list" queryParams := make(map[string]any) @@ -245,7 +355,43 @@ func (a *secretsImpl) ListAcls(ctx context.Context, request ListAclsRequest) (*L return &listAclsResponse, err } -func (a *secretsImpl) ListScopes(ctx context.Context) (*ListScopesResponse, error) { +// List all scopes. +// +// Lists all secret scopes available in the workspace. +// +// Throws `PERMISSION_DENIED` if the user does not have permission to make this +// API call. +func (a *secretsImpl) ListScopes(ctx context.Context) listing.Iterator[SecretScope] { + request := struct{}{} + + getNextPage := func(ctx context.Context, req struct{}) (*ListScopesResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListScopes(ctx) + } + getItems := func(resp *ListScopesResponse) []SecretScope { + return resp.Scopes + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List all scopes. +// +// Lists all secret scopes available in the workspace. +// +// Throws `PERMISSION_DENIED` if the user does not have permission to make this +// API call. +func (a *secretsImpl) ListScopesAll(ctx context.Context) ([]SecretScope, error) { + iterator := a.ListScopes(ctx) + return listing.ToSlice[SecretScope](ctx, iterator) +} + +func (a *secretsImpl) internalListScopes(ctx context.Context) (*ListScopesResponse, error) { var listScopesResponse ListScopesResponse path := "/api/2.0/secrets/scopes/list" @@ -255,7 +401,50 @@ func (a *secretsImpl) ListScopes(ctx context.Context) (*ListScopesResponse, erro return &listScopesResponse, err } -func (a *secretsImpl) ListSecrets(ctx context.Context, request ListSecretsRequest) (*ListSecretsResponse, error) { +// List secret keys. +// +// Lists the secret keys that are stored at this scope. This is a metadata-only +// operation; secret data cannot be retrieved using this API. Users need the +// READ permission to make this call. +// +// The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws +// `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *secretsImpl) ListSecrets(ctx context.Context, request ListSecretsRequest) listing.Iterator[SecretMetadata] { + + getNextPage := func(ctx context.Context, req ListSecretsRequest) (*ListSecretsResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListSecrets(ctx, req) + } + getItems := func(resp *ListSecretsResponse) []SecretMetadata { + return resp.Secrets + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List secret keys. +// +// Lists the secret keys that are stored at this scope. This is a metadata-only +// operation; secret data cannot be retrieved using this API. Users need the +// READ permission to make this call. +// +// The lastUpdatedTimestamp returned is in milliseconds since epoch. Throws +// `RESOURCE_DOES_NOT_EXIST` if no such secret scope exists. Throws +// `PERMISSION_DENIED` if the user does not have permission to make this API +// call. +func (a *secretsImpl) ListSecretsAll(ctx context.Context, request ListSecretsRequest) ([]SecretMetadata, error) { + iterator := a.ListSecrets(ctx, request) + return listing.ToSlice[SecretMetadata](ctx, iterator) +} + +func (a *secretsImpl) internalListSecrets(ctx context.Context, request ListSecretsRequest) (*ListSecretsResponse, error) { var listSecretsResponse ListSecretsResponse path := "/api/2.0/secrets/list" queryParams := make(map[string]any) @@ -354,7 +543,40 @@ func (a *workspaceImpl) Import(ctx context.Context, request Import) error { return err } -func (a *workspaceImpl) List(ctx context.Context, request ListWorkspaceRequest) (*ListResponse, error) { +// List contents. +// +// Lists the contents of a directory, or the object if it is not a directory. If +// the input path does not exist, this call returns an error +// `RESOURCE_DOES_NOT_EXIST`. +func (a *workspaceImpl) List(ctx context.Context, request ListWorkspaceRequest) listing.Iterator[ObjectInfo] { + + getNextPage := func(ctx context.Context, req ListWorkspaceRequest) (*ListResponse, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalList(ctx, req) + } + getItems := func(resp *ListResponse) []ObjectInfo { + return resp.Objects + } + + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + nil) + return iterator +} + +// List contents. +// +// Lists the contents of a directory, or the object if it is not a directory. If +// the input path does not exist, this call returns an error +// `RESOURCE_DOES_NOT_EXIST`. +func (a *workspaceImpl) ListAll(ctx context.Context, request ListWorkspaceRequest) ([]ObjectInfo, error) { + iterator := a.List(ctx, request) + return listing.ToSlice[ObjectInfo](ctx, iterator) +} + +func (a *workspaceImpl) internalList(ctx context.Context, request ListWorkspaceRequest) (*ListResponse, error) { var listResponse ListResponse path := "/api/2.0/workspace/list" queryParams := make(map[string]any) diff --git a/version/version.go b/version/version.go index 47e4ca507..4a840112f 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.57.0" +const Version = "0.58.0" From 967d0632b7676ca14b3dae154dcc2f727f4350c6 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Wed, 12 Feb 2025 11:39:58 +0100 Subject: [PATCH 15/54] [Release] Release v0.58.1 (#1146) ### Internal Changes * Do not send ForceSendFields as query parameters. --- CHANGELOG.md | 6 + service/apps/model.go | 34 ++--- service/billing/model.go | 50 +++--- service/catalog/model.go | 278 +++++++++++++++++----------------- service/cleanrooms/model.go | 38 ++--- service/compute/model.go | 196 ++++++++++++------------ service/dashboards/model.go | 48 +++--- service/files/model.go | 26 ++-- service/iam/model.go | 74 ++++----- service/jobs/model.go | 162 ++++++++++---------- service/marketplace/model.go | 118 +++++++-------- service/ml/model.go | 164 ++++++++++---------- service/oauth2/model.go | 54 +++---- service/pipelines/model.go | 84 +++++----- service/pkg.go | 2 +- service/provisioning/model.go | 50 +++--- service/serving/model.go | 82 +++++----- service/settings/model.go | 162 ++++++++++---------- service/sharing/model.go | 56 +++---- service/sql/model.go | 188 +++++++++++------------ service/vectorsearch/model.go | 52 +++---- service/workspace/model.go | 64 ++++---- version/version.go | 2 +- 23 files changed, 998 insertions(+), 992 deletions(-) diff --git a/CHANGELOG.md b/CHANGELOG.md index a2f2aaf25..dbad64813 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Version changelog +## [Release] Release v0.58.1 + +### Internal Changes + +* Do not send ForceSendFields as query parameters. + ## [Release] Release v0.58.0 ### New Features and Improvements diff --git a/service/apps/model.go b/service/apps/model.go index 26d7b2b0c..4ff7ace55 100755 --- a/service/apps/model.go +++ b/service/apps/model.go @@ -49,7 +49,7 @@ type App struct { // The URL of the app once it is deployed. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *App) UnmarshalJSON(b []byte) error { @@ -70,7 +70,7 @@ type AppAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AppAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -93,7 +93,7 @@ type AppAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AppAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -128,7 +128,7 @@ type AppDeployment struct { // The update time of the deployment. Formatted timestamp in ISO 6801. UpdateTime string `json:"update_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AppDeployment) UnmarshalJSON(b []byte) error { @@ -144,7 +144,7 @@ type AppDeploymentArtifacts struct { // the deployed app. SourceCodePath string `json:"source_code_path,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AppDeploymentArtifacts) UnmarshalJSON(b []byte) error { @@ -219,7 +219,7 @@ type AppDeploymentStatus struct { // State of the deployment. State AppDeploymentState `json:"state,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AppDeploymentStatus) UnmarshalJSON(b []byte) error { @@ -237,7 +237,7 @@ type AppPermission struct { // Permission level PermissionLevel AppPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AppPermission) UnmarshalJSON(b []byte) error { @@ -283,7 +283,7 @@ type AppPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AppPermissions) UnmarshalJSON(b []byte) error { @@ -299,7 +299,7 @@ type AppPermissionsDescription struct { // Permission level PermissionLevel AppPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AppPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -330,7 +330,7 @@ type AppResource struct { SqlWarehouse *AppResourceSqlWarehouse `json:"sql_warehouse,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AppResource) UnmarshalJSON(b []byte) error { @@ -532,7 +532,7 @@ type ApplicationStatus struct { // State of the application. State ApplicationState `json:"state,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ApplicationStatus) UnmarshalJSON(b []byte) error { @@ -586,7 +586,7 @@ type ComputeStatus struct { // State of the app compute. State ComputeState `json:"state,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ComputeStatus) UnmarshalJSON(b []byte) error { @@ -610,7 +610,7 @@ type CreateAppRequest struct { // If true, the app will not be started after creation. NoCompute bool `json:"-" url:"no_compute,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateAppRequest) UnmarshalJSON(b []byte) error { @@ -668,7 +668,7 @@ type ListAppDeploymentsRequest struct { // absent. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAppDeploymentsRequest) UnmarshalJSON(b []byte) error { @@ -685,7 +685,7 @@ type ListAppDeploymentsResponse struct { // Pagination token to request the next page of apps. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAppDeploymentsResponse) UnmarshalJSON(b []byte) error { @@ -704,7 +704,7 @@ type ListAppsRequest struct { // absent. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAppsRequest) UnmarshalJSON(b []byte) error { @@ -720,7 +720,7 @@ type ListAppsResponse struct { // Pagination token to request the next page of apps. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAppsResponse) UnmarshalJSON(b []byte) error { diff --git a/service/billing/model.go b/service/billing/model.go index 641a70fad..d95320f06 100644 --- a/service/billing/model.go +++ b/service/billing/model.go @@ -18,7 +18,7 @@ type ActionConfiguration struct { // Target for the action. For example, an email address. Target string `json:"target,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ActionConfiguration) UnmarshalJSON(b []byte) error { @@ -72,7 +72,7 @@ type AlertConfiguration struct { // triggered state. TriggerType AlertConfigurationTriggerType `json:"trigger_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AlertConfiguration) UnmarshalJSON(b []byte) error { @@ -178,7 +178,7 @@ type BudgetConfiguration struct { // Update time of this budget configuration. UpdateTime int64 `json:"update_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *BudgetConfiguration) UnmarshalJSON(b []byte) error { @@ -234,7 +234,7 @@ type BudgetConfigurationFilterTagClause struct { Value *BudgetConfigurationFilterClause `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *BudgetConfigurationFilterTagClause) UnmarshalJSON(b []byte) error { @@ -263,7 +263,7 @@ type BudgetPolicy struct { // contain only characters from the ISO 8859-1 (latin1) set. PolicyName string `json:"policy_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *BudgetPolicy) UnmarshalJSON(b []byte) error { @@ -283,7 +283,7 @@ type CreateBillingUsageDashboardRequest struct { // created. WorkspaceId int64 `json:"workspace_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateBillingUsageDashboardRequest) UnmarshalJSON(b []byte) error { @@ -298,7 +298,7 @@ type CreateBillingUsageDashboardResponse struct { // The unique id of the usage dashboard. DashboardId string `json:"dashboard_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateBillingUsageDashboardResponse) UnmarshalJSON(b []byte) error { @@ -323,7 +323,7 @@ type CreateBudgetConfigurationBudget struct { // matched for usage to be included. Filter *BudgetConfigurationFilter `json:"filter,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateBudgetConfigurationBudget) UnmarshalJSON(b []byte) error { @@ -340,7 +340,7 @@ type CreateBudgetConfigurationBudgetActionConfigurations struct { // Target for the action. For example, an email address. Target string `json:"target,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateBudgetConfigurationBudgetActionConfigurations) UnmarshalJSON(b []byte) error { @@ -367,7 +367,7 @@ type CreateBudgetConfigurationBudgetAlertConfigurations struct { // triggered state. TriggerType AlertConfigurationTriggerType `json:"trigger_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateBudgetConfigurationBudgetAlertConfigurations) UnmarshalJSON(b []byte) error { @@ -402,7 +402,7 @@ type CreateBudgetPolicyRequest struct { // `request_id` is provided. RequestId string `json:"request_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateBudgetPolicyRequest) UnmarshalJSON(b []byte) error { @@ -484,7 +484,7 @@ type CreateLogDeliveryConfigurationParams struct { // unnecessary. WorkspaceIdsFilter []int64 `json:"workspace_ids_filter,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateLogDeliveryConfigurationParams) UnmarshalJSON(b []byte) error { @@ -576,7 +576,7 @@ type DownloadRequest struct { // field is required. StartMonth string `json:"-" url:"start_month"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DownloadRequest) UnmarshalJSON(b []byte) error { @@ -604,7 +604,7 @@ type Filter struct { // policies will be returned. PolicyName string `json:"policy_name,omitempty" url:"policy_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Filter) UnmarshalJSON(b []byte) error { @@ -625,7 +625,7 @@ type GetBillingUsageDashboardRequest struct { // created. WorkspaceId int64 `json:"-" url:"workspace_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetBillingUsageDashboardRequest) UnmarshalJSON(b []byte) error { @@ -642,7 +642,7 @@ type GetBillingUsageDashboardResponse struct { // The URL of the usage dashboard. DashboardUrl string `json:"dashboard_url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetBillingUsageDashboardResponse) UnmarshalJSON(b []byte) error { @@ -687,7 +687,7 @@ type ListBudgetConfigurationsRequest struct { // page if absent. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListBudgetConfigurationsRequest) UnmarshalJSON(b []byte) error { @@ -704,7 +704,7 @@ type ListBudgetConfigurationsResponse struct { // results. If this field is omitted, there are no subsequent budgets. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListBudgetConfigurationsResponse) UnmarshalJSON(b []byte) error { @@ -734,7 +734,7 @@ type ListBudgetPoliciesRequest struct { // The sort specification. SortSpec *SortSpec `json:"-" url:"sort_spec,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListBudgetPoliciesRequest) UnmarshalJSON(b []byte) error { @@ -756,7 +756,7 @@ type ListBudgetPoliciesResponse struct { // In this field is omitted, there are no previous pages. PreviousPageToken string `json:"previous_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListBudgetPoliciesResponse) UnmarshalJSON(b []byte) error { @@ -776,7 +776,7 @@ type ListLogDeliveryRequest struct { // Filter by storage configuration ID. StorageConfigurationId string `json:"-" url:"storage_configuration_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListLogDeliveryRequest) UnmarshalJSON(b []byte) error { @@ -902,7 +902,7 @@ type LogDeliveryConfiguration struct { // unnecessary. WorkspaceIdsFilter []int64 `json:"workspace_ids_filter,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LogDeliveryConfiguration) UnmarshalJSON(b []byte) error { @@ -935,7 +935,7 @@ type LogDeliveryStatus struct { // the account. Status DeliveryStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LogDeliveryStatus) UnmarshalJSON(b []byte) error { @@ -1031,7 +1031,7 @@ type SortSpec struct { // The filed to sort by Field SortSpecField `json:"field,omitempty" url:"field,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SortSpec) UnmarshalJSON(b []byte) error { @@ -1083,7 +1083,7 @@ type UpdateBudgetConfigurationBudget struct { // matched for usage to be included. Filter *BudgetConfigurationFilter `json:"filter,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateBudgetConfigurationBudget) UnmarshalJSON(b []byte) error { diff --git a/service/catalog/model.go b/service/catalog/model.go index 010abbdc6..6673ab8cf 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -71,7 +71,7 @@ type ArtifactAllowlistInfo struct { // Unique identifier of parent metastore. MetastoreId string `json:"metastore_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ArtifactAllowlistInfo) UnmarshalJSON(b []byte) error { @@ -136,7 +136,7 @@ type AwsCredentials struct { // credentials. SessionToken string `json:"session_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AwsCredentials) UnmarshalJSON(b []byte) error { @@ -159,7 +159,7 @@ type AwsIamRole struct { // This is the identity that is going to assume the AWS IAM role. UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AwsIamRole) UnmarshalJSON(b []byte) error { @@ -185,7 +185,7 @@ type AwsIamRoleResponse struct { // This is the identity that is going to assume the AWS IAM role. UnityCatalogIamArn string `json:"unity_catalog_iam_arn,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AwsIamRoleResponse) UnmarshalJSON(b []byte) error { @@ -204,7 +204,7 @@ type AzureActiveDirectoryToken struct { // Directory to access cloud services. AadToken string `json:"aad_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AzureActiveDirectoryToken) UnmarshalJSON(b []byte) error { @@ -234,7 +234,7 @@ type AzureManagedIdentity struct { // using the system-assigned identity. ManagedIdentityId string `json:"managed_identity_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AzureManagedIdentity) UnmarshalJSON(b []byte) error { @@ -258,7 +258,7 @@ type AzureManagedIdentityRequest struct { // for a system-assigned identity. ManagedIdentityId string `json:"managed_identity_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AzureManagedIdentityRequest) UnmarshalJSON(b []byte) error { @@ -284,7 +284,7 @@ type AzureManagedIdentityResponse struct { // for a system-assigned identity. ManagedIdentityId string `json:"managed_identity_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AzureManagedIdentityResponse) UnmarshalJSON(b []byte) error { @@ -314,7 +314,7 @@ type AzureUserDelegationSas struct { // The signed URI (SAS Token) used to access blob services for a given path SasToken string `json:"sas_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AzureUserDelegationSas) UnmarshalJSON(b []byte) error { @@ -391,7 +391,7 @@ type CatalogInfo struct { // Username of user who last modified catalog. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CatalogInfo) UnmarshalJSON(b []byte) error { @@ -496,7 +496,7 @@ type ColumnInfo struct { // Full data type specification as SQL/catalogString text. TypeText string `json:"type_text,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ColumnInfo) UnmarshalJSON(b []byte) error { @@ -516,7 +516,7 @@ type ColumnMask struct { // match the types of columns in 'using_column_names'. UsingColumnNames []string `json:"using_column_names,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ColumnMask) UnmarshalJSON(b []byte) error { @@ -633,7 +633,7 @@ type ConnectionInfo struct { // URL of the remote data source, extracted from options. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ConnectionInfo) UnmarshalJSON(b []byte) error { @@ -703,7 +703,7 @@ type ContinuousUpdateStatus struct { // table to the online table. Timestamp string `json:"timestamp,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ContinuousUpdateStatus) UnmarshalJSON(b []byte) error { @@ -735,7 +735,7 @@ type CreateCatalog struct { // Storage root URL for managed tables within catalog. StorageRoot string `json:"storage_root,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateCatalog) UnmarshalJSON(b []byte) error { @@ -761,7 +761,7 @@ type CreateConnection struct { // If the connection is read only. ReadOnly bool `json:"read_only,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateConnection) UnmarshalJSON(b []byte) error { @@ -797,7 +797,7 @@ type CreateCredentialRequest struct { // set of credentials. SkipValidation bool `json:"skip_validation,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateCredentialRequest) UnmarshalJSON(b []byte) error { @@ -831,7 +831,7 @@ type CreateExternalLocation struct { // Path URL of the external location. Url string `json:"url"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateExternalLocation) UnmarshalJSON(b []byte) error { @@ -890,7 +890,7 @@ type CreateFunction struct { // List of schemes whose objects can be referenced without qualification. SqlPath string `json:"sql_path,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateFunction) UnmarshalJSON(b []byte) error { @@ -1030,7 +1030,7 @@ type CreateMetastore struct { // The storage root URL for metastore StorageRoot string `json:"storage_root,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateMetastore) UnmarshalJSON(b []byte) error { @@ -1092,7 +1092,7 @@ type CreateMonitor struct { // specified, the first running warehouse will be used. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateMonitor) UnmarshalJSON(b []byte) error { @@ -1122,7 +1122,7 @@ type CreateRegisteredModelRequest struct { // are stored StorageLocation string `json:"storage_location,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateRegisteredModelRequest) UnmarshalJSON(b []byte) error { @@ -1148,7 +1148,7 @@ type CreateSchema struct { // Storage root URL for managed tables within schema. StorageRoot string `json:"storage_root,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateSchema) UnmarshalJSON(b []byte) error { @@ -1180,7 +1180,7 @@ type CreateStorageCredential struct { // credential. SkipValidation bool `json:"skip_validation,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateStorageCredential) UnmarshalJSON(b []byte) error { @@ -1214,7 +1214,7 @@ type CreateVolumeRequestContent struct { VolumeType VolumeType `json:"volume_type"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateVolumeRequestContent) UnmarshalJSON(b []byte) error { @@ -1269,7 +1269,7 @@ type CredentialInfo struct { // credential. Only applicable when purpose is **STORAGE**. UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CredentialInfo) UnmarshalJSON(b []byte) error { @@ -1341,7 +1341,7 @@ type CredentialValidationResult struct { // The results of the tested operation. Result ValidateCredentialResult `json:"result,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CredentialValidationResult) UnmarshalJSON(b []byte) error { @@ -1441,7 +1441,7 @@ type DatabricksGcpServiceAccount struct { // The ID that represents the private key for this Service Account PrivateKeyId string `json:"private_key_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DatabricksGcpServiceAccount) UnmarshalJSON(b []byte) error { @@ -1462,7 +1462,7 @@ type DatabricksGcpServiceAccountResponse struct { // The email of the service account. This is an output-only field. Email string `json:"email,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DatabricksGcpServiceAccountResponse) UnmarshalJSON(b []byte) error { @@ -1488,7 +1488,7 @@ type DeleteAccountMetastoreRequest struct { // Unity Catalog metastore ID MetastoreId string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteAccountMetastoreRequest) UnmarshalJSON(b []byte) error { @@ -1509,7 +1509,7 @@ type DeleteAccountStorageCredentialRequest struct { // Name of the storage credential. StorageCredentialName string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteAccountStorageCredentialRequest) UnmarshalJSON(b []byte) error { @@ -1538,7 +1538,7 @@ type DeleteCatalogRequest struct { // The name of the catalog. Name string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteCatalogRequest) UnmarshalJSON(b []byte) error { @@ -1564,7 +1564,7 @@ type DeleteCredentialRequest struct { // Name of the credential. NameArg string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteCredentialRequest) UnmarshalJSON(b []byte) error { @@ -1585,7 +1585,7 @@ type DeleteExternalLocationRequest struct { // Name of the external location. Name string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteExternalLocationRequest) UnmarshalJSON(b []byte) error { @@ -1604,7 +1604,7 @@ type DeleteFunctionRequest struct { // __catalog_name__.__schema_name__.__function__name__). Name string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteFunctionRequest) UnmarshalJSON(b []byte) error { @@ -1622,7 +1622,7 @@ type DeleteMetastoreRequest struct { // Unique ID of the metastore. Id string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteMetastoreRequest) UnmarshalJSON(b []byte) error { @@ -1669,7 +1669,7 @@ type DeleteSchemaRequest struct { // Full name of the schema. FullName string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteSchemaRequest) UnmarshalJSON(b []byte) error { @@ -1688,7 +1688,7 @@ type DeleteStorageCredentialRequest struct { // Name of the storage credential. Name string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteStorageCredentialRequest) UnmarshalJSON(b []byte) error { @@ -1774,7 +1774,7 @@ type EffectivePredictiveOptimizationFlag struct { // objects under it. Value EnablePredictiveOptimization `json:"value"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EffectivePredictiveOptimizationFlag) UnmarshalJSON(b []byte) error { @@ -1826,7 +1826,7 @@ type EffectivePrivilege struct { // The privilege assigned to the principal. Privilege Privilege `json:"privilege,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EffectivePrivilege) UnmarshalJSON(b []byte) error { @@ -1844,7 +1844,7 @@ type EffectivePrivilegeAssignment struct { // inheritance). Privileges []EffectivePrivilege `json:"privileges,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EffectivePrivilegeAssignment) UnmarshalJSON(b []byte) error { @@ -1950,7 +1950,7 @@ type ExternalLocationInfo struct { // Path URL of the external location. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExternalLocationInfo) UnmarshalJSON(b []byte) error { @@ -1974,7 +1974,7 @@ type FailedStatus struct { // and available for serving. Timestamp string `json:"timestamp,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FailedStatus) UnmarshalJSON(b []byte) error { @@ -2072,7 +2072,7 @@ type FunctionInfo struct { // Username of user who last modified function. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FunctionInfo) UnmarshalJSON(b []byte) error { @@ -2222,7 +2222,7 @@ type FunctionParameterInfo struct { // Full data type spec, SQL/catalogString text. TypeText string `json:"type_text"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FunctionParameterInfo) UnmarshalJSON(b []byte) error { @@ -2298,7 +2298,7 @@ func (f *FunctionParameterType) Type() string { type GcpOauthToken struct { OauthToken string `json:"oauth_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GcpOauthToken) UnmarshalJSON(b []byte) error { @@ -2343,7 +2343,7 @@ type GenerateTemporaryTableCredentialRequest struct { // UUID of the table to read or write. TableId string `json:"table_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GenerateTemporaryTableCredentialRequest) UnmarshalJSON(b []byte) error { @@ -2377,7 +2377,7 @@ type GenerateTemporaryTableCredentialResponse struct { // The URL of the storage path accessible by the temporary credential. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GenerateTemporaryTableCredentialResponse) UnmarshalJSON(b []byte) error { @@ -2430,7 +2430,7 @@ type GetBindingsRequest struct { // The type of the securable to bind to a workspace. SecurableType GetBindingsSecurableType `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetBindingsRequest) UnmarshalJSON(b []byte) error { @@ -2482,7 +2482,7 @@ type GetByAliasRequest struct { // response IncludeAliases bool `json:"-" url:"include_aliases,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetByAliasRequest) UnmarshalJSON(b []byte) error { @@ -2501,7 +2501,7 @@ type GetCatalogRequest struct { // The name of the catalog. Name string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetCatalogRequest) UnmarshalJSON(b []byte) error { @@ -2534,7 +2534,7 @@ type GetEffectiveRequest struct { // Type of securable. SecurableType SecurableType `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetEffectiveRequest) UnmarshalJSON(b []byte) error { @@ -2553,7 +2553,7 @@ type GetExternalLocationRequest struct { // Name of the external location. Name string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetExternalLocationRequest) UnmarshalJSON(b []byte) error { @@ -2573,7 +2573,7 @@ type GetFunctionRequest struct { // __catalog_name__.__schema_name__.__function__name__). Name string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetFunctionRequest) UnmarshalJSON(b []byte) error { @@ -2594,7 +2594,7 @@ type GetGrantRequest struct { // Type of securable. SecurableType SecurableType `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetGrantRequest) UnmarshalJSON(b []byte) error { @@ -2655,7 +2655,7 @@ type GetMetastoreSummaryResponse struct { // Username of user who last modified the metastore. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetMetastoreSummaryResponse) UnmarshalJSON(b []byte) error { @@ -2707,7 +2707,7 @@ type GetModelVersionRequest struct { // The integer version number of the model version Version int `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetModelVersionRequest) UnmarshalJSON(b []byte) error { @@ -2765,7 +2765,7 @@ type GetRegisteredModelRequest struct { // principal can only access selective metadata for IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetRegisteredModelRequest) UnmarshalJSON(b []byte) error { @@ -2784,7 +2784,7 @@ type GetSchemaRequest struct { // only access selective metadata for IncludeBrowse bool `json:"-" url:"include_browse,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetSchemaRequest) UnmarshalJSON(b []byte) error { @@ -2813,7 +2813,7 @@ type GetTableRequest struct { // Whether to include a manifest containing capabilities the table has. IncludeManifestCapabilities bool `json:"-" url:"include_manifest_capabilities,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetTableRequest) UnmarshalJSON(b []byte) error { @@ -2897,7 +2897,7 @@ type ListCatalogsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCatalogsRequest) UnmarshalJSON(b []byte) error { @@ -2916,7 +2916,7 @@ type ListCatalogsResponse struct { // request (for the next page of results). NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCatalogsResponse) UnmarshalJSON(b []byte) error { @@ -2939,7 +2939,7 @@ type ListConnectionsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListConnectionsRequest) UnmarshalJSON(b []byte) error { @@ -2958,7 +2958,7 @@ type ListConnectionsResponse struct { // request (for the next page of results). NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListConnectionsResponse) UnmarshalJSON(b []byte) error { @@ -2982,7 +2982,7 @@ type ListCredentialsRequest struct { // Return only credentials for the specified purpose. Purpose CredentialPurpose `json:"-" url:"purpose,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCredentialsRequest) UnmarshalJSON(b []byte) error { @@ -3000,7 +3000,7 @@ type ListCredentialsResponse struct { // request (for the next page of results). NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCredentialsResponse) UnmarshalJSON(b []byte) error { @@ -3026,7 +3026,7 @@ type ListExternalLocationsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExternalLocationsRequest) UnmarshalJSON(b []byte) error { @@ -3045,7 +3045,7 @@ type ListExternalLocationsResponse struct { // request (for the next page of results). NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExternalLocationsResponse) UnmarshalJSON(b []byte) error { @@ -3075,7 +3075,7 @@ type ListFunctionsRequest struct { // Parent schema of functions. SchemaName string `json:"-" url:"schema_name"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListFunctionsRequest) UnmarshalJSON(b []byte) error { @@ -3094,7 +3094,7 @@ type ListFunctionsResponse struct { // request (for the next page of results). NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListFunctionsResponse) UnmarshalJSON(b []byte) error { @@ -3129,7 +3129,7 @@ type ListModelVersionsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListModelVersionsRequest) UnmarshalJSON(b []byte) error { @@ -3147,7 +3147,7 @@ type ListModelVersionsResponse struct { // request (for the next page of results). NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListModelVersionsResponse) UnmarshalJSON(b []byte) error { @@ -3165,7 +3165,7 @@ type ListQuotasRequest struct { // Opaque token for the next page of results. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListQuotasRequest) UnmarshalJSON(b []byte) error { @@ -3184,7 +3184,7 @@ type ListQuotasResponse struct { // An array of returned QuotaInfos. Quotas []QuotaInfo `json:"quotas,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListQuotasResponse) UnmarshalJSON(b []byte) error { @@ -3233,7 +3233,7 @@ type ListRegisteredModelsRequest struct { // specified, catalog_name must be specified. SchemaName string `json:"-" url:"schema_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListRegisteredModelsRequest) UnmarshalJSON(b []byte) error { @@ -3251,7 +3251,7 @@ type ListRegisteredModelsResponse struct { RegisteredModels []RegisteredModelInfo `json:"registered_models,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListRegisteredModelsResponse) UnmarshalJSON(b []byte) error { @@ -3279,7 +3279,7 @@ type ListSchemasRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSchemasRequest) UnmarshalJSON(b []byte) error { @@ -3298,7 +3298,7 @@ type ListSchemasResponse struct { // An array of schema information objects. Schemas []SchemaInfo `json:"schemas,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSchemasResponse) UnmarshalJSON(b []byte) error { @@ -3321,7 +3321,7 @@ type ListStorageCredentialsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListStorageCredentialsRequest) UnmarshalJSON(b []byte) error { @@ -3340,7 +3340,7 @@ type ListStorageCredentialsResponse struct { StorageCredentials []StorageCredentialInfo `json:"storage_credentials,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListStorageCredentialsResponse) UnmarshalJSON(b []byte) error { @@ -3374,7 +3374,7 @@ type ListSummariesRequest struct { // if not set or empty. TableNamePattern string `json:"-" url:"table_name_pattern,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSummariesRequest) UnmarshalJSON(b []byte) error { @@ -3399,7 +3399,7 @@ type ListSystemSchemasRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSystemSchemasRequest) UnmarshalJSON(b []byte) error { @@ -3418,7 +3418,7 @@ type ListSystemSchemasResponse struct { // An array of system schema information objects. Schemas []SystemSchemaInfo `json:"schemas,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSystemSchemasResponse) UnmarshalJSON(b []byte) error { @@ -3437,7 +3437,7 @@ type ListTableSummariesResponse struct { // List of table summaries. Tables []TableSummary `json:"tables,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListTableSummariesResponse) UnmarshalJSON(b []byte) error { @@ -3478,7 +3478,7 @@ type ListTablesRequest struct { // Parent schema of tables. SchemaName string `json:"-" url:"schema_name"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListTablesRequest) UnmarshalJSON(b []byte) error { @@ -3497,7 +3497,7 @@ type ListTablesResponse struct { // An array of table information objects. Tables []TableInfo `json:"tables,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListTablesResponse) UnmarshalJSON(b []byte) error { @@ -3534,7 +3534,7 @@ type ListVolumesRequest struct { // The identifier of the schema SchemaName string `json:"-" url:"schema_name"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListVolumesRequest) UnmarshalJSON(b []byte) error { @@ -3553,7 +3553,7 @@ type ListVolumesResponseContent struct { Volumes []VolumeInfo `json:"volumes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListVolumesResponseContent) UnmarshalJSON(b []byte) error { @@ -3598,7 +3598,7 @@ type MetastoreAssignment struct { // The unique ID of the Databricks workspace. WorkspaceId int64 `json:"workspace_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MetastoreAssignment) UnmarshalJSON(b []byte) error { @@ -3653,7 +3653,7 @@ type MetastoreInfo struct { // Username of user who last modified the metastore. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MetastoreInfo) UnmarshalJSON(b []byte) error { @@ -3744,7 +3744,7 @@ type ModelVersionInfo struct { // requests. Version int `json:"version,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ModelVersionInfo) UnmarshalJSON(b []byte) error { @@ -3832,7 +3832,7 @@ type MonitorDataClassificationConfig struct { // Whether data classification is enabled. Enabled bool `json:"enabled,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MonitorDataClassificationConfig) UnmarshalJSON(b []byte) error { @@ -3879,7 +3879,7 @@ type MonitorInferenceLog struct { // [function]: https://spark.apache.org/docs/latest/api/python/reference/pyspark.sql/api/pyspark.sql.functions.to_timestamp.html TimestampCol string `json:"timestamp_col"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MonitorInferenceLog) UnmarshalJSON(b []byte) error { @@ -3970,7 +3970,7 @@ type MonitorInfo struct { // Configuration for monitoring time series tables. TimeSeries *MonitorTimeSeries `json:"time_series,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MonitorInfo) UnmarshalJSON(b []byte) error { @@ -4106,7 +4106,7 @@ type MonitorRefreshInfo struct { // The method by which the refresh was triggered. Trigger MonitorRefreshInfoTrigger `json:"trigger,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MonitorRefreshInfo) UnmarshalJSON(b []byte) error { @@ -4223,7 +4223,7 @@ type OnlineTable struct { // runs asynchronously). UnityCatalogProvisioningState ProvisioningInfoState `json:"unity_catalog_provisioning_state,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *OnlineTable) UnmarshalJSON(b []byte) error { @@ -4260,7 +4260,7 @@ type OnlineTableSpec struct { // key. TimeseriesKey string `json:"timeseries_key,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *OnlineTableSpec) UnmarshalJSON(b []byte) error { @@ -4343,7 +4343,7 @@ type OnlineTableStatus struct { // ONLINE_TRIGGERED_UPDATE or the ONLINE_NO_PENDING_UPDATE state. TriggeredUpdateStatus *TriggeredUpdateStatus `json:"triggered_update_status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *OnlineTableStatus) UnmarshalJSON(b []byte) error { @@ -4362,7 +4362,7 @@ type PermissionsChange struct { // The set of privileges to remove. Remove []Privilege `json:"remove,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PermissionsChange) UnmarshalJSON(b []byte) error { @@ -4393,7 +4393,7 @@ type PipelineProgress struct { // number may be an estimate. TotalRowCount int64 `json:"total_row_count,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineProgress) UnmarshalJSON(b []byte) error { @@ -4530,7 +4530,7 @@ type PrivilegeAssignment struct { // The privileges assigned to the principal. Privileges []Privilege `json:"privileges,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PrivilegeAssignment) UnmarshalJSON(b []byte) error { @@ -4607,7 +4607,7 @@ type QuotaInfo struct { // The name of the quota. QuotaName string `json:"quota_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QuotaInfo) UnmarshalJSON(b []byte) error { @@ -4628,7 +4628,7 @@ type R2Credentials struct { // The generated JWT that users must pass to use the temporary credentials. SessionToken string `json:"session_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *R2Credentials) UnmarshalJSON(b []byte) error { @@ -4647,7 +4647,7 @@ type ReadVolumeRequest struct { // The three-level (fully qualified) name of the volume Name string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ReadVolumeRequest) UnmarshalJSON(b []byte) error { @@ -4665,7 +4665,7 @@ type RegenerateDashboardRequest struct { // not specified, the first running warehouse will be used. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegenerateDashboardRequest) UnmarshalJSON(b []byte) error { @@ -4682,7 +4682,7 @@ type RegenerateDashboardResponse struct { // The directory where the regenerated dashboard is stored. ParentFolder string `json:"parent_folder,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegenerateDashboardResponse) UnmarshalJSON(b []byte) error { @@ -4700,7 +4700,7 @@ type RegisteredModelAlias struct { // Integer version number of the model version to which this alias points. VersionNum int `json:"version_num,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegisteredModelAlias) UnmarshalJSON(b []byte) error { @@ -4746,7 +4746,7 @@ type RegisteredModelInfo struct { // The identifier of the user who updated the registered model last time UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegisteredModelInfo) UnmarshalJSON(b []byte) error { @@ -4804,7 +4804,7 @@ type SchemaInfo struct { // Username of user who last modified schema. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SchemaInfo) UnmarshalJSON(b []byte) error { @@ -4899,7 +4899,7 @@ type SseEncryptionDetails struct { // key to use. AwsKmsKeyArn string `json:"aws_kms_key_arn,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SseEncryptionDetails) UnmarshalJSON(b []byte) error { @@ -4977,7 +4977,7 @@ type StorageCredentialInfo struct { // credential. UsedForManagedStorage bool `json:"used_for_managed_storage,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *StorageCredentialInfo) UnmarshalJSON(b []byte) error { @@ -4995,7 +4995,7 @@ type SystemSchemaInfo struct { // means the system schema is available and ready for opt-in. State SystemSchemaInfoState `json:"state,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SystemSchemaInfo) UnmarshalJSON(b []byte) error { @@ -5063,7 +5063,7 @@ type TableExistsResponse struct { // Whether the table exists or not. TableExists bool `json:"table_exists,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TableExistsResponse) UnmarshalJSON(b []byte) error { @@ -5153,7 +5153,7 @@ type TableInfo struct { // dependencies are provided and recorded. ViewDependencies *DependencyList `json:"view_dependencies,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TableInfo) UnmarshalJSON(b []byte) error { @@ -5206,7 +5206,7 @@ type TableSummary struct { TableType TableType `json:"table_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TableSummary) UnmarshalJSON(b []byte) error { @@ -5271,7 +5271,7 @@ type TemporaryCredentials struct { // https://developers.google.com/identity/protocols/oauth2/service-account GcpOauthToken *GcpOauthToken `json:"gcp_oauth_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TemporaryCredentials) UnmarshalJSON(b []byte) error { @@ -5295,7 +5295,7 @@ type TriggeredUpdateStatus struct { // Progress of the active data synchronization pipeline. TriggeredUpdateProgress *PipelineProgress `json:"triggered_update_progress,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TriggeredUpdateStatus) UnmarshalJSON(b []byte) error { @@ -5371,7 +5371,7 @@ type UpdateCatalog struct { // A map of key-value properties attached to the securable. Properties map[string]string `json:"properties,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateCatalog) UnmarshalJSON(b []byte) error { @@ -5392,7 +5392,7 @@ type UpdateConnection struct { // Username of current owner of the connection. Owner string `json:"owner,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateConnection) UnmarshalJSON(b []byte) error { @@ -5436,7 +5436,7 @@ type UpdateCredentialRequest struct { // credential. SkipValidation bool `json:"skip_validation,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateCredentialRequest) UnmarshalJSON(b []byte) error { @@ -5479,7 +5479,7 @@ type UpdateExternalLocation struct { // Path URL of the external location. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateExternalLocation) UnmarshalJSON(b []byte) error { @@ -5497,7 +5497,7 @@ type UpdateFunction struct { // Username of current owner of function. Owner string `json:"owner,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateFunction) UnmarshalJSON(b []byte) error { @@ -5528,7 +5528,7 @@ type UpdateMetastore struct { // UUID of storage credential to access the metastore storage_root. StorageRootCredentialId string `json:"storage_root_credential_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateMetastore) UnmarshalJSON(b []byte) error { @@ -5549,7 +5549,7 @@ type UpdateMetastoreAssignment struct { // A workspace ID. WorkspaceId int64 `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateMetastoreAssignment) UnmarshalJSON(b []byte) error { @@ -5596,7 +5596,7 @@ type UpdateModelVersionRequest struct { // The integer version number of the model version Version int `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateModelVersionRequest) UnmarshalJSON(b []byte) error { @@ -5642,7 +5642,7 @@ type UpdateMonitor struct { // Configuration for monitoring time series tables. TimeSeries *MonitorTimeSeries `json:"time_series,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateMonitor) UnmarshalJSON(b []byte) error { @@ -5672,7 +5672,7 @@ type UpdateRegisteredModelRequest struct { // The identifier of the user who owns the registered model Owner string `json:"owner,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateRegisteredModelRequest) UnmarshalJSON(b []byte) error { @@ -5701,7 +5701,7 @@ type UpdateSchema struct { // A map of key-value properties attached to the securable. Properties map[string]string `json:"properties,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateSchema) UnmarshalJSON(b []byte) error { @@ -5742,7 +5742,7 @@ type UpdateStorageCredential struct { // credential. SkipValidation bool `json:"skip_validation,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateStorageCredential) UnmarshalJSON(b []byte) error { @@ -5760,7 +5760,7 @@ type UpdateTableRequest struct { Owner string `json:"owner,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateTableRequest) UnmarshalJSON(b []byte) error { @@ -5781,7 +5781,7 @@ type UpdateVolumeRequestContent struct { // The identifier of the user who owns the volume Owner string `json:"owner,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateVolumeRequestContent) UnmarshalJSON(b []byte) error { @@ -5833,7 +5833,7 @@ type ValidateCredentialRequest struct { // **STORAGE**. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ValidateCredentialRequest) UnmarshalJSON(b []byte) error { @@ -5851,7 +5851,7 @@ type ValidateCredentialResponse struct { // The results of the validation check. Results []CredentialValidationResult `json:"results,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ValidateCredentialResponse) UnmarshalJSON(b []byte) error { @@ -5912,7 +5912,7 @@ type ValidateStorageCredential struct { // The external location url to validate. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ValidateStorageCredential) UnmarshalJSON(b []byte) error { @@ -5929,7 +5929,7 @@ type ValidateStorageCredentialResponse struct { // The results of the validation check. Results []ValidationResult `json:"results,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ValidateStorageCredentialResponse) UnmarshalJSON(b []byte) error { @@ -5948,7 +5948,7 @@ type ValidationResult struct { // The results of the tested operation. Result ValidationResultResult `json:"result,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ValidationResult) UnmarshalJSON(b []byte) error { @@ -6061,7 +6061,7 @@ type VolumeInfo struct { VolumeType VolumeType `json:"volume_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *VolumeInfo) UnmarshalJSON(b []byte) error { @@ -6104,7 +6104,7 @@ type WorkspaceBinding struct { WorkspaceId int64 `json:"workspace_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WorkspaceBinding) UnmarshalJSON(b []byte) error { @@ -6151,7 +6151,7 @@ type WorkspaceBindingsResponse struct { // request (for the next page of results). NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WorkspaceBindingsResponse) UnmarshalJSON(b []byte) error { diff --git a/service/cleanrooms/model.go b/service/cleanrooms/model.go index 44aeb6ae6..4e49f301b 100755 --- a/service/cleanrooms/model.go +++ b/service/cleanrooms/model.go @@ -44,7 +44,7 @@ type CleanRoom struct { // When the clean room was last updated, in epoch milliseconds. UpdatedAt int64 `json:"updated_at,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoom) UnmarshalJSON(b []byte) error { @@ -125,7 +125,7 @@ type CleanRoomAsset struct { // if and only if **asset_type** is **VOLUME** VolumeLocalDetails *CleanRoomAssetVolumeLocalDetails `json:"volume_local_details,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomAsset) UnmarshalJSON(b []byte) error { @@ -179,7 +179,7 @@ type CleanRoomAssetForeignTableLocalDetails struct { // metastore, in the format of *catalog*.*schema*.*foreign_table_name* LocalName string `json:"local_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomAssetForeignTableLocalDetails) UnmarshalJSON(b []byte) error { @@ -197,7 +197,7 @@ type CleanRoomAssetNotebook struct { // as returned by :method:workspace/export with the format of **HTML**. NotebookContent string `json:"notebook_content,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomAssetNotebook) UnmarshalJSON(b []byte) error { @@ -249,7 +249,7 @@ type CleanRoomAssetTableLocalDetails struct { // Partition filtering specification for a shared table. Partitions []sharing.PartitionSpecificationPartition `json:"partitions,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomAssetTableLocalDetails) UnmarshalJSON(b []byte) error { @@ -270,7 +270,7 @@ type CleanRoomAssetViewLocalDetails struct { // the format of *catalog*.*schema*.*view_name* LocalName string `json:"local_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomAssetViewLocalDetails) UnmarshalJSON(b []byte) error { @@ -286,7 +286,7 @@ type CleanRoomAssetVolumeLocalDetails struct { // the format of *catalog*.*schema*.*volume_name* LocalName string `json:"local_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomAssetVolumeLocalDetails) UnmarshalJSON(b []byte) error { @@ -329,7 +329,7 @@ type CleanRoomCollaborator struct { // configured in the metastore OrganizationName string `json:"organization_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomCollaborator) UnmarshalJSON(b []byte) error { @@ -362,7 +362,7 @@ type CleanRoomNotebookTaskRun struct { // When the task run started, in epoch milliseconds. StartTime int64 `json:"start_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomNotebookTaskRun) UnmarshalJSON(b []byte) error { @@ -382,7 +382,7 @@ type CleanRoomOutputCatalog struct { Status CleanRoomOutputCatalogOutputCatalogStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomOutputCatalog) UnmarshalJSON(b []byte) error { @@ -446,7 +446,7 @@ type CleanRoomRemoteDetail struct { // Region of the central clean room. Region string `json:"region,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomRemoteDetail) UnmarshalJSON(b []byte) error { @@ -500,7 +500,7 @@ type CollaboratorJobRunInfo struct { // ID of the collaborator's workspace that triggered the task run. CollaboratorWorkspaceId int64 `json:"collaborator_workspace_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CollaboratorJobRunInfo) UnmarshalJSON(b []byte) error { @@ -520,7 +520,7 @@ type ComplianceSecurityProfile struct { // Whether the compliance security profile is enabled. IsEnabled bool `json:"is_enabled,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ComplianceSecurityProfile) UnmarshalJSON(b []byte) error { @@ -604,7 +604,7 @@ type ListCleanRoomAssetsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCleanRoomAssetsRequest) UnmarshalJSON(b []byte) error { @@ -623,7 +623,7 @@ type ListCleanRoomAssetsResponse struct { // (for the next page of results). NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCleanRoomAssetsResponse) UnmarshalJSON(b []byte) error { @@ -645,7 +645,7 @@ type ListCleanRoomNotebookTaskRunsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCleanRoomNotebookTaskRunsRequest) UnmarshalJSON(b []byte) error { @@ -664,7 +664,7 @@ type ListCleanRoomNotebookTaskRunsResponse struct { // Name of the clean room. Runs []CleanRoomNotebookTaskRun `json:"runs,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCleanRoomNotebookTaskRunsResponse) UnmarshalJSON(b []byte) error { @@ -683,7 +683,7 @@ type ListCleanRoomsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCleanRoomsRequest) UnmarshalJSON(b []byte) error { @@ -701,7 +701,7 @@ type ListCleanRoomsResponse struct { // (for the next page of results). NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCleanRoomsResponse) UnmarshalJSON(b []byte) error { diff --git a/service/compute/model.go b/service/compute/model.go index 4bb1066a4..6aa55693f 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -36,7 +36,7 @@ type AddInstanceProfile struct { // forcibly add the instance profile. SkipValidation bool `json:"skip_validation,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AddInstanceProfile) UnmarshalJSON(b []byte) error { @@ -66,7 +66,7 @@ type AutoScale struct { // have after creation. MinWorkers int `json:"min_workers,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AutoScale) UnmarshalJSON(b []byte) error { @@ -162,7 +162,7 @@ type AwsAttributes struct { // as the default value can be found by using the `List Zones` method. ZoneId string `json:"zone_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AwsAttributes) UnmarshalJSON(b []byte) error { @@ -232,7 +232,7 @@ type AzureAttributes struct { // availability. Further, the value should > 0 or -1. SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AzureAttributes) UnmarshalJSON(b []byte) error { @@ -282,7 +282,7 @@ type CancelCommand struct { ContextId string `json:"contextId,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CancelCommand) UnmarshalJSON(b []byte) error { @@ -312,7 +312,7 @@ type ClientsTypes struct { // With notebooks set, this cluster can be used for notebooks Notebooks bool `json:"notebooks,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClientsTypes) UnmarshalJSON(b []byte) error { @@ -369,7 +369,7 @@ type ClusterAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -392,7 +392,7 @@ type ClusterAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -562,7 +562,7 @@ type ClusterAttributes struct { WorkloadType *WorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterAttributes) UnmarshalJSON(b []byte) error { @@ -585,7 +585,7 @@ type ClusterCompliance struct { // policy validation error. Violations map[string]string `json:"violations,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterCompliance) UnmarshalJSON(b []byte) error { @@ -838,7 +838,7 @@ type ClusterDetails struct { WorkloadType *WorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterDetails) UnmarshalJSON(b []byte) error { @@ -863,7 +863,7 @@ type ClusterEvent struct { Type EventType `json:"type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterEvent) UnmarshalJSON(b []byte) error { @@ -880,7 +880,7 @@ type ClusterLibraryStatuses struct { // Status of all libraries on the cluster. LibraryStatuses []LibraryFullStatus `json:"library_statuses,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterLibraryStatuses) UnmarshalJSON(b []byte) error { @@ -913,7 +913,7 @@ type ClusterPermission struct { // Permission level PermissionLevel ClusterPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterPermission) UnmarshalJSON(b []byte) error { @@ -961,7 +961,7 @@ type ClusterPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterPermissions) UnmarshalJSON(b []byte) error { @@ -977,7 +977,7 @@ type ClusterPermissionsDescription struct { // Permission level PermissionLevel ClusterPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -1004,7 +1004,7 @@ type ClusterPolicyAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterPolicyAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -1027,7 +1027,7 @@ type ClusterPolicyAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterPolicyAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -1045,7 +1045,7 @@ type ClusterPolicyPermission struct { // Permission level PermissionLevel ClusterPolicyPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterPolicyPermission) UnmarshalJSON(b []byte) error { @@ -1089,7 +1089,7 @@ type ClusterPolicyPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterPolicyPermissions) UnmarshalJSON(b []byte) error { @@ -1105,7 +1105,7 @@ type ClusterPolicyPermissionsDescription struct { // Permission level PermissionLevel ClusterPolicyPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterPolicyPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -1138,7 +1138,7 @@ type ClusterSettingsChange struct { // reading the settings field in the API response. PreviousValue string `json:"previous_value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterSettingsChange) UnmarshalJSON(b []byte) error { @@ -1166,7 +1166,7 @@ type ClusterSize struct { // increase from 5 to 10 as the new nodes are provisioned. NumWorkers int `json:"num_workers,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterSize) UnmarshalJSON(b []byte) error { @@ -1395,7 +1395,7 @@ type ClusterSpec struct { WorkloadType *WorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterSpec) UnmarshalJSON(b []byte) error { @@ -1422,7 +1422,7 @@ type Command struct { Language Language `json:"language,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Command) UnmarshalJSON(b []byte) error { @@ -1484,7 +1484,7 @@ type CommandStatusResponse struct { Status CommandStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CommandStatusResponse) UnmarshalJSON(b []byte) error { @@ -1536,7 +1536,7 @@ type ContextStatusResponse struct { Status ContextStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ContextStatusResponse) UnmarshalJSON(b []byte) error { @@ -1728,7 +1728,7 @@ type CreateCluster struct { WorkloadType *WorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateCluster) UnmarshalJSON(b []byte) error { @@ -1742,7 +1742,7 @@ func (s CreateCluster) MarshalJSON() ([]byte, error) { type CreateClusterResponse struct { ClusterId string `json:"cluster_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateClusterResponse) UnmarshalJSON(b []byte) error { @@ -1759,7 +1759,7 @@ type CreateContext struct { Language Language `json:"language,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateContext) UnmarshalJSON(b []byte) error { @@ -1826,7 +1826,7 @@ type CreateInstancePool struct { // :method:clusters/sparkVersions API call. PreloadedSparkVersions []string `json:"preloaded_spark_versions,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateInstancePool) UnmarshalJSON(b []byte) error { @@ -1841,7 +1841,7 @@ type CreateInstancePoolResponse struct { // The ID of the created instance pool. InstancePoolId string `json:"instance_pool_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateInstancePoolResponse) UnmarshalJSON(b []byte) error { @@ -1887,7 +1887,7 @@ type CreatePolicy struct { // definition. PolicyFamilyId string `json:"policy_family_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreatePolicy) UnmarshalJSON(b []byte) error { @@ -1902,7 +1902,7 @@ type CreatePolicyResponse struct { // Canonical unique identifier for the cluster policy. PolicyId string `json:"policy_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreatePolicyResponse) UnmarshalJSON(b []byte) error { @@ -1917,7 +1917,7 @@ type CreateResponse struct { // The global init script ID. ScriptId string `json:"script_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateResponse) UnmarshalJSON(b []byte) error { @@ -1931,7 +1931,7 @@ func (s CreateResponse) MarshalJSON() ([]byte, error) { type Created struct { Id string `json:"id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Created) UnmarshalJSON(b []byte) error { @@ -1958,7 +1958,7 @@ type CustomPolicyTag struct { // (https://src.dev.databricks.com/databricks/universe@1647196627c8dc7b4152ad098a94b86484b93a6c/-/blob/cluster-common/conf/src/ClusterTagConstraints.scala?L24) Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CustomPolicyTag) UnmarshalJSON(b []byte) error { @@ -1979,7 +1979,7 @@ type DataPlaneEventDetails struct { // Timestamp int64 `json:"timestamp,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DataPlaneEventDetails) UnmarshalJSON(b []byte) error { @@ -2190,7 +2190,7 @@ type DiskSpec struct { // The type of disks that will be launched with this cluster. DiskType *DiskType `json:"disk_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DiskSpec) UnmarshalJSON(b []byte) error { @@ -2267,7 +2267,7 @@ type DockerBasicAuth struct { // Name of the user Username string `json:"username,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DockerBasicAuth) UnmarshalJSON(b []byte) error { @@ -2283,7 +2283,7 @@ type DockerImage struct { // URL of the docker image. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DockerImage) UnmarshalJSON(b []byte) error { @@ -2502,7 +2502,7 @@ type EditCluster struct { WorkloadType *WorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EditCluster) UnmarshalJSON(b []byte) error { @@ -2549,7 +2549,7 @@ type EditInstancePool struct { // :method:clusters/listNodeTypes API call. NodeTypeId string `json:"node_type_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EditInstancePool) UnmarshalJSON(b []byte) error { @@ -2600,7 +2600,7 @@ type EditPolicy struct { // The ID of the policy to update. PolicyId string `json:"policy_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EditPolicy) UnmarshalJSON(b []byte) error { @@ -2624,7 +2624,7 @@ type EnforceClusterComplianceRequest struct { // compliance but does not update the cluster. ValidateOnly bool `json:"validate_only,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EnforceClusterComplianceRequest) UnmarshalJSON(b []byte) error { @@ -2643,7 +2643,7 @@ type EnforceClusterComplianceResponse struct { // cluster to become compliant with its policy. HasChanges bool `json:"has_changes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EnforceClusterComplianceResponse) UnmarshalJSON(b []byte) error { @@ -2722,7 +2722,7 @@ type EventDetails struct { // control plane.) User string `json:"user,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EventDetails) UnmarshalJSON(b []byte) error { @@ -2879,7 +2879,7 @@ type GcpAttributes struct { // https://cloud.google.com/compute/docs/regions-zones. ZoneId string `json:"zone_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GcpAttributes) UnmarshalJSON(b []byte) error { @@ -2944,7 +2944,7 @@ type GetClusterComplianceResponse struct { // policy validation error. Violations map[string]string `json:"violations,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetClusterComplianceResponse) UnmarshalJSON(b []byte) error { @@ -3023,7 +3023,7 @@ type GetEvents struct { // from the beginning of time. StartTime int64 `json:"start_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetEvents) UnmarshalJSON(b []byte) error { @@ -3072,7 +3072,7 @@ type GetEventsResponse struct { // event_types. TotalCount int64 `json:"total_count,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetEventsResponse) UnmarshalJSON(b []byte) error { @@ -3164,7 +3164,7 @@ type GetInstancePool struct { // Status of failed pending instances in the pool. Status *InstancePoolStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetInstancePool) UnmarshalJSON(b []byte) error { @@ -3206,7 +3206,7 @@ type GetPolicyFamilyRequest struct { // version. Version int64 `json:"-" url:"version,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetPolicyFamilyRequest) UnmarshalJSON(b []byte) error { @@ -3242,7 +3242,7 @@ type GlobalInitScriptCreateRequest struct { // The Base64-encoded content of the script. Script string `json:"script"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GlobalInitScriptCreateRequest) UnmarshalJSON(b []byte) error { @@ -3274,7 +3274,7 @@ type GlobalInitScriptDetails struct { // The username of the user who last updated the script UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GlobalInitScriptDetails) UnmarshalJSON(b []byte) error { @@ -3308,7 +3308,7 @@ type GlobalInitScriptDetailsWithContent struct { // The username of the user who last updated the script UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GlobalInitScriptDetailsWithContent) UnmarshalJSON(b []byte) error { @@ -3342,7 +3342,7 @@ type GlobalInitScriptUpdateRequest struct { // The ID of the global init script. ScriptId string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GlobalInitScriptUpdateRequest) UnmarshalJSON(b []byte) error { @@ -3361,7 +3361,7 @@ type InitScriptEventDetails struct { // The private ip address of the node where the init scripts were run. ReportedForNode string `json:"reported_for_node,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InitScriptEventDetails) UnmarshalJSON(b []byte) error { @@ -3380,7 +3380,7 @@ type InitScriptExecutionDetails struct { // The current status of the script Status InitScriptExecutionDetailsStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InitScriptExecutionDetails) UnmarshalJSON(b []byte) error { @@ -3482,7 +3482,7 @@ type InstancePoolAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -3505,7 +3505,7 @@ type InstancePoolAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -3591,7 +3591,7 @@ type InstancePoolAndStats struct { // Status of failed pending instances in the pool. Status *InstancePoolStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolAndStats) UnmarshalJSON(b []byte) error { @@ -3633,7 +3633,7 @@ type InstancePoolAwsAttributes struct { // can be found by using the `List Zones` method. ZoneId string `json:"zone_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolAwsAttributes) UnmarshalJSON(b []byte) error { @@ -3685,7 +3685,7 @@ type InstancePoolAzureAttributes struct { // CommonConf.defaultSpotBidMaxPrice. SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolAzureAttributes) UnmarshalJSON(b []byte) error { @@ -3757,7 +3757,7 @@ type InstancePoolGcpAttributes struct { // on. ZoneId string `json:"zone_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolGcpAttributes) UnmarshalJSON(b []byte) error { @@ -3775,7 +3775,7 @@ type InstancePoolPermission struct { // Permission level PermissionLevel InstancePoolPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolPermission) UnmarshalJSON(b []byte) error { @@ -3821,7 +3821,7 @@ type InstancePoolPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolPermissions) UnmarshalJSON(b []byte) error { @@ -3837,7 +3837,7 @@ type InstancePoolPermissionsDescription struct { // Permission level PermissionLevel InstancePoolPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -3894,7 +3894,7 @@ type InstancePoolStats struct { // Number of active instances in the pool that are part of a cluster. UsedCount int `json:"used_count,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstancePoolStats) UnmarshalJSON(b []byte) error { @@ -3933,7 +3933,7 @@ type InstanceProfile struct { // is optional, the default value is `false`. IsMetaInstanceProfile bool `json:"is_meta_instance_profile,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstanceProfile) UnmarshalJSON(b []byte) error { @@ -4040,7 +4040,7 @@ type Library struct { // cluster with an IAM role to access the S3 URI. Whl string `json:"whl,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Library) UnmarshalJSON(b []byte) error { @@ -4064,7 +4064,7 @@ type LibraryFullStatus struct { // Status of installing the library on the cluster. Status LibraryInstallStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LibraryFullStatus) UnmarshalJSON(b []byte) error { @@ -4127,7 +4127,7 @@ type ListAvailableZonesResponse struct { // The list of available zones (e.g., ['us-west-2c', 'us-east-2']). Zones []string `json:"zones,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAvailableZonesResponse) UnmarshalJSON(b []byte) error { @@ -4150,7 +4150,7 @@ type ListClusterCompliancesRequest struct { // Canonical unique identifier for the cluster policy. PolicyId string `json:"-" url:"policy_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListClusterCompliancesRequest) UnmarshalJSON(b []byte) error { @@ -4172,7 +4172,7 @@ type ListClusterCompliancesResponse struct { // request. PrevPageToken string `json:"prev_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListClusterCompliancesResponse) UnmarshalJSON(b []byte) error { @@ -4204,7 +4204,7 @@ type ListClustersFilterBy struct { // The ID of the cluster policy used to create the cluster if applicable. PolicyId string `json:"policy_id,omitempty" url:"policy_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListClustersFilterBy) UnmarshalJSON(b []byte) error { @@ -4229,7 +4229,7 @@ type ListClustersRequest struct { // Sort the list of clusters by a specific criteria. SortBy *ListClustersSortBy `json:"-" url:"sort_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListClustersRequest) UnmarshalJSON(b []byte) error { @@ -4251,7 +4251,7 @@ type ListClustersResponse struct { // request. PrevPageToken string `json:"prev_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListClustersResponse) UnmarshalJSON(b []byte) error { @@ -4359,7 +4359,7 @@ type ListPolicyFamiliesRequest struct { // A token that can be used to get the next page of results. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListPolicyFamiliesRequest) UnmarshalJSON(b []byte) error { @@ -4377,7 +4377,7 @@ type ListPolicyFamiliesResponse struct { // List of policy families. PolicyFamilies []PolicyFamily `json:"policy_families,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListPolicyFamiliesResponse) UnmarshalJSON(b []byte) error { @@ -4454,7 +4454,7 @@ type LogAnalyticsInfo struct { // LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LogAnalyticsInfo) UnmarshalJSON(b []byte) error { @@ -4473,7 +4473,7 @@ type LogSyncStatus struct { // the response) if there is no exception in last attempted. LastException string `json:"last_exception,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LogSyncStatus) UnmarshalJSON(b []byte) error { @@ -4497,7 +4497,7 @@ type MavenLibrary struct { // Central Repository and Spark Packages are searched. Repo string `json:"repo,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MavenLibrary) UnmarshalJSON(b []byte) error { @@ -4519,7 +4519,7 @@ type NodeInstanceType struct { LocalNvmeDisks int `json:"local_nvme_disks,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NodeInstanceType) UnmarshalJSON(b []byte) error { @@ -4579,7 +4579,7 @@ type NodeType struct { // with elastic disk enabled. This is true for most node types. SupportsElasticDisk bool `json:"supports_elastic_disk,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NodeType) UnmarshalJSON(b []byte) error { @@ -4595,7 +4595,7 @@ type PendingInstanceError struct { Message string `json:"message,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PendingInstanceError) UnmarshalJSON(b []byte) error { @@ -4670,7 +4670,7 @@ type Policy struct { // Canonical unique identifier for the Cluster Policy. PolicyId string `json:"policy_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Policy) UnmarshalJSON(b []byte) error { @@ -4694,7 +4694,7 @@ type PolicyFamily struct { // Unique identifier for the policy family. PolicyFamilyId string `json:"policy_family_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PolicyFamily) UnmarshalJSON(b []byte) error { @@ -4714,7 +4714,7 @@ type PythonPyPiLibrary struct { // default pip index is used. Repo string `json:"repo,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PythonPyPiLibrary) UnmarshalJSON(b []byte) error { @@ -4732,7 +4732,7 @@ type RCranLibrary struct { // default CRAN repo is used. Repo string `json:"repo,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RCranLibrary) UnmarshalJSON(b []byte) error { @@ -4770,7 +4770,7 @@ type ResizeCluster struct { // increase from 5 to 10 as the new nodes are provisioned. NumWorkers int `json:"num_workers,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ResizeCluster) UnmarshalJSON(b []byte) error { @@ -4790,7 +4790,7 @@ type RestartCluster struct { // RestartUser string `json:"restart_user,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RestartCluster) UnmarshalJSON(b []byte) error { @@ -4860,7 +4860,7 @@ type Results struct { // true if partial results are returned. Truncated bool `json:"truncated,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Results) UnmarshalJSON(b []byte) error { @@ -4940,7 +4940,7 @@ type S3StorageInfo struct { // If both are set, endpoint will be used. Region string `json:"region,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *S3StorageInfo) UnmarshalJSON(b []byte) error { @@ -4978,7 +4978,7 @@ type SparkNode struct { // creation timestamp in the database. StartTimestamp int64 `json:"start_timestamp,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SparkNode) UnmarshalJSON(b []byte) error { @@ -4993,7 +4993,7 @@ type SparkNodeAwsAttributes struct { // Whether this node is on an Amazon spot instance. IsSpot bool `json:"is_spot,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SparkNodeAwsAttributes) UnmarshalJSON(b []byte) error { @@ -5014,7 +5014,7 @@ type SparkVersion struct { // A descriptive name for this Spark version, for example "Spark 2.1". Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SparkVersion) UnmarshalJSON(b []byte) error { @@ -5502,7 +5502,7 @@ type UpdateClusterResource struct { WorkloadType *WorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateClusterResource) UnmarshalJSON(b []byte) error { diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 08f320ed5..563fed5fa 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -106,7 +106,7 @@ type Dashboard struct { // The warehouse ID used to run the dashboard. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Dashboard) UnmarshalJSON(b []byte) error { @@ -209,7 +209,7 @@ type DeleteScheduleRequest struct { // UUID identifying the schedule. ScheduleId string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteScheduleRequest) UnmarshalJSON(b []byte) error { @@ -235,7 +235,7 @@ type DeleteSubscriptionRequest struct { // UUID identifying the subscription. SubscriptionId string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteSubscriptionRequest) UnmarshalJSON(b []byte) error { @@ -269,7 +269,7 @@ type ExecutePublishedDashboardQueryRequest struct { // processing the published dashboard queries OverrideWarehouseId string `json:"override_warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExecutePublishedDashboardQueryRequest) UnmarshalJSON(b []byte) error { @@ -304,7 +304,7 @@ type GenieConversation struct { // ID of the user who created the conversation UserId int `json:"user_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GenieConversation) UnmarshalJSON(b []byte) error { @@ -415,7 +415,7 @@ type GenieMessage struct { // ID of the user who created the message UserId int64 `json:"user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GenieMessage) UnmarshalJSON(b []byte) error { @@ -523,7 +523,7 @@ type ListDashboardsRequest struct { // `DASHBOARD_VIEW_BASIC`only includes summary metadata from the dashboard. View DashboardView `json:"-" url:"view,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListDashboardsRequest) UnmarshalJSON(b []byte) error { @@ -540,7 +540,7 @@ type ListDashboardsResponse struct { // this field is omitted, there are no subsequent dashboards. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListDashboardsResponse) UnmarshalJSON(b []byte) error { @@ -561,7 +561,7 @@ type ListSchedulesRequest struct { // retrieve the subsequent page. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSchedulesRequest) UnmarshalJSON(b []byte) error { @@ -580,7 +580,7 @@ type ListSchedulesResponse struct { Schedules []Schedule `json:"schedules,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSchedulesResponse) UnmarshalJSON(b []byte) error { @@ -603,7 +603,7 @@ type ListSubscriptionsRequest struct { // UUID identifying the schedule which the subscriptions belongs. ScheduleId string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSubscriptionsRequest) UnmarshalJSON(b []byte) error { @@ -622,7 +622,7 @@ type ListSubscriptionsResponse struct { Subscriptions []Subscription `json:"subscriptions,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSubscriptionsResponse) UnmarshalJSON(b []byte) error { @@ -638,7 +638,7 @@ type MessageError struct { Type MessageErrorType `json:"type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MessageError) UnmarshalJSON(b []byte) error { @@ -843,7 +843,7 @@ type MigrateDashboardRequest struct { // dashboard. UpdateParameterSyntax bool `json:"update_parameter_syntax,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MigrateDashboardRequest) UnmarshalJSON(b []byte) error { @@ -889,7 +889,7 @@ type PublishRequest struct { // was set in the draft. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PublishRequest) UnmarshalJSON(b []byte) error { @@ -910,7 +910,7 @@ type PublishedDashboard struct { // The warehouse ID used to run the published dashboard. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PublishedDashboard) UnmarshalJSON(b []byte) error { @@ -942,7 +942,7 @@ type QueryAttachment struct { // Name of the query Title string `json:"title,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryAttachment) UnmarshalJSON(b []byte) error { @@ -970,7 +970,7 @@ type QueryResponseStatus struct { Success *SuccessStatus `json:"success,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryResponseStatus) UnmarshalJSON(b []byte) error { @@ -988,7 +988,7 @@ type QuerySchema struct { // re-executed. StatementId string `json:"statement_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QuerySchema) UnmarshalJSON(b []byte) error { @@ -1019,7 +1019,7 @@ type Result struct { // full result data. StatementId string `json:"statement_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Result) UnmarshalJSON(b []byte) error { @@ -1053,7 +1053,7 @@ type Schedule struct { // The warehouse id to run the dashboard with for the schedule. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Schedule) UnmarshalJSON(b []byte) error { @@ -1122,7 +1122,7 @@ type Subscription struct { // A timestamp indicating when the subscription was last updated. UpdateTime string `json:"update_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Subscription) UnmarshalJSON(b []byte) error { @@ -1151,7 +1151,7 @@ type SuccessStatus struct { // Whether the query result is truncated (either by byte limit or row limit) Truncated bool `json:"truncated,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SuccessStatus) UnmarshalJSON(b []byte) error { @@ -1168,7 +1168,7 @@ type TextAttachment struct { Id string `json:"id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TextAttachment) UnmarshalJSON(b []byte) error { diff --git a/service/files/model.go b/service/files/model.go index 10ef77d13..71b3939f6 100755 --- a/service/files/model.go +++ b/service/files/model.go @@ -33,7 +33,7 @@ type Create struct { // The path of the new file. The path should be the absolute DBFS path. Path string `json:"path"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Create) UnmarshalJSON(b []byte) error { @@ -58,7 +58,7 @@ type CreateResponse struct { // calls when writing to a file through a stream. Handle int64 `json:"handle,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateResponse) UnmarshalJSON(b []byte) error { @@ -77,7 +77,7 @@ type Delete struct { // empty directories can be done without providing the recursive flag. Recursive bool `json:"recursive,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Delete) UnmarshalJSON(b []byte) error { @@ -119,7 +119,7 @@ type DirectoryEntry struct { // The absolute path of the file or directory. Path string `json:"path,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DirectoryEntry) UnmarshalJSON(b []byte) error { @@ -145,7 +145,7 @@ type DownloadResponse struct { LastModified string `json:"-" url:"-" header:"last-modified,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DownloadResponse) UnmarshalJSON(b []byte) error { @@ -166,7 +166,7 @@ type FileInfo struct { // The absolute path of the file or directory. Path string `json:"path,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FileInfo) UnmarshalJSON(b []byte) error { @@ -199,7 +199,7 @@ type GetMetadataResponse struct { LastModified string `json:"-" url:"-" header:"last-modified,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetMetadataResponse) UnmarshalJSON(b []byte) error { @@ -249,7 +249,7 @@ type ListDirectoryContentsRequest struct { // must not be used to determine when the listing is complete. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListDirectoryContentsRequest) UnmarshalJSON(b []byte) error { @@ -266,7 +266,7 @@ type ListDirectoryResponse struct { // A token, which can be sent as `page_token` to retrieve the next page. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListDirectoryResponse) UnmarshalJSON(b []byte) error { @@ -311,7 +311,7 @@ type Put struct { // The path of the new file. The path should be the absolute DBFS path. Path string `json:"path"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Put) UnmarshalJSON(b []byte) error { @@ -335,7 +335,7 @@ type ReadDbfsRequest struct { // The path of the file to read. The path should be the absolute DBFS path. Path string `json:"-" url:"path"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ReadDbfsRequest) UnmarshalJSON(b []byte) error { @@ -354,7 +354,7 @@ type ReadResponse struct { // The base64-encoded contents of the file read. Data string `json:"data,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ReadResponse) UnmarshalJSON(b []byte) error { @@ -373,7 +373,7 @@ type UploadRequest struct { // If true, an existing file will be overwritten. Overwrite bool `json:"-" url:"overwrite,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UploadRequest) UnmarshalJSON(b []byte) error { diff --git a/service/iam/model.go b/service/iam/model.go index 1dbdcb937..811e50611 100755 --- a/service/iam/model.go +++ b/service/iam/model.go @@ -18,7 +18,7 @@ type AccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AccessControlRequest) UnmarshalJSON(b []byte) error { @@ -41,7 +41,7 @@ type AccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AccessControlResponse) UnmarshalJSON(b []byte) error { @@ -58,7 +58,7 @@ func (s AccessControlResponse) MarshalJSON() ([]byte, error) { type Actor struct { ActorId int64 `json:"actor_id,omitempty" url:"actor_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Actor) UnmarshalJSON(b []byte) error { @@ -92,7 +92,7 @@ type CheckPolicyResponse struct { IsPermitted bool `json:"is_permitted,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CheckPolicyResponse) UnmarshalJSON(b []byte) error { @@ -114,7 +114,7 @@ type ComplexValue struct { Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ComplexValue) UnmarshalJSON(b []byte) error { @@ -217,7 +217,7 @@ type GetAccountUserRequest struct { // Specifies the index of the first result. First item is number 1. StartIndex int `json:"-" url:"startIndex,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetAccountUserRequest) UnmarshalJSON(b []byte) error { @@ -348,7 +348,7 @@ type GetUserRequest struct { // Specifies the index of the first result. First item is number 1. StartIndex int `json:"-" url:"startIndex,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetUserRequest) UnmarshalJSON(b []byte) error { @@ -395,7 +395,7 @@ type Group struct { // The schema of the group. Schemas []GroupSchema `json:"schemas,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Group) UnmarshalJSON(b []byte) error { @@ -454,7 +454,7 @@ type ListAccountGroupsRequest struct { // Specifies the index of the first result. First item is number 1. StartIndex int64 `json:"-" url:"startIndex,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAccountGroupsRequest) UnmarshalJSON(b []byte) error { @@ -488,7 +488,7 @@ type ListAccountServicePrincipalsRequest struct { // Specifies the index of the first result. First item is number 1. StartIndex int64 `json:"-" url:"startIndex,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAccountServicePrincipalsRequest) UnmarshalJSON(b []byte) error { @@ -523,7 +523,7 @@ type ListAccountUsersRequest struct { // Specifies the index of the first result. First item is number 1. StartIndex int64 `json:"-" url:"startIndex,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAccountUsersRequest) UnmarshalJSON(b []byte) error { @@ -557,7 +557,7 @@ type ListGroupsRequest struct { // Specifies the index of the first result. First item is number 1. StartIndex int64 `json:"-" url:"startIndex,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListGroupsRequest) UnmarshalJSON(b []byte) error { @@ -581,7 +581,7 @@ type ListGroupsResponse struct { // Total results that match the request filters. TotalResults int64 `json:"totalResults,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListGroupsResponse) UnmarshalJSON(b []byte) error { @@ -630,7 +630,7 @@ type ListServicePrincipalResponse struct { // Total results that match the request filters. TotalResults int64 `json:"totalResults,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListServicePrincipalResponse) UnmarshalJSON(b []byte) error { @@ -664,7 +664,7 @@ type ListServicePrincipalsRequest struct { // Specifies the index of the first result. First item is number 1. StartIndex int64 `json:"-" url:"startIndex,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListServicePrincipalsRequest) UnmarshalJSON(b []byte) error { @@ -726,7 +726,7 @@ type ListUsersRequest struct { // Specifies the index of the first result. First item is number 1. StartIndex int64 `json:"-" url:"startIndex,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListUsersRequest) UnmarshalJSON(b []byte) error { @@ -750,7 +750,7 @@ type ListUsersResponse struct { // Total results that match the request filters. TotalResults int64 `json:"totalResults,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListUsersResponse) UnmarshalJSON(b []byte) error { @@ -778,7 +778,7 @@ type MigratePermissionsRequest struct { // will occur. WorkspaceId int64 `json:"workspace_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MigratePermissionsRequest) UnmarshalJSON(b []byte) error { @@ -793,7 +793,7 @@ type MigratePermissionsResponse struct { // Number of permissions migrated. PermissionsMigrated int `json:"permissions_migrated,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MigratePermissionsResponse) UnmarshalJSON(b []byte) error { @@ -810,7 +810,7 @@ type Name struct { // Given name of the Databricks user. GivenName string `json:"givenName,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Name) UnmarshalJSON(b []byte) error { @@ -828,7 +828,7 @@ type ObjectPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ObjectPermissions) UnmarshalJSON(b []byte) error { @@ -859,7 +859,7 @@ type PasswordAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PasswordAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -882,7 +882,7 @@ type PasswordAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PasswordAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -900,7 +900,7 @@ type PasswordPermission struct { // Permission level PermissionLevel PasswordPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PasswordPermission) UnmarshalJSON(b []byte) error { @@ -944,7 +944,7 @@ type PasswordPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PasswordPermissions) UnmarshalJSON(b []byte) error { @@ -960,7 +960,7 @@ type PasswordPermissionsDescription struct { // Permission level PermissionLevel PasswordPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PasswordPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -983,7 +983,7 @@ type Patch struct { // Value to modify Value any `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Patch) UnmarshalJSON(b []byte) error { @@ -1059,7 +1059,7 @@ type Permission struct { // Permission level PermissionLevel PermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Permission) UnmarshalJSON(b []byte) error { @@ -1080,7 +1080,7 @@ type PermissionAssignment struct { // Information about the principal assigned to the workspace. Principal *PrincipalOutput `json:"principal,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PermissionAssignment) UnmarshalJSON(b []byte) error { @@ -1160,7 +1160,7 @@ type PermissionOutput struct { PermissionLevel WorkspacePermission `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PermissionOutput) UnmarshalJSON(b []byte) error { @@ -1176,7 +1176,7 @@ type PermissionsDescription struct { // Permission level PermissionLevel PermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PermissionsDescription) UnmarshalJSON(b []byte) error { @@ -1213,7 +1213,7 @@ type PrincipalOutput struct { // The username of the user. Present only if the principal is a user. UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PrincipalOutput) UnmarshalJSON(b []byte) error { @@ -1262,7 +1262,7 @@ type ResourceInfo struct { // another parent. ParentResourceInfo *ResourceInfo `json:"parent_resource_info,omitempty" url:"parent_resource_info,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ResourceInfo) UnmarshalJSON(b []byte) error { @@ -1278,7 +1278,7 @@ type ResourceMeta struct { // (`WorkspaceGroup`) or account group (`Group`). ResourceType string `json:"resourceType,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ResourceMeta) UnmarshalJSON(b []byte) error { @@ -1302,7 +1302,7 @@ type RuleSetResponse struct { // Name of the rule set. Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RuleSetResponse) UnmarshalJSON(b []byte) error { @@ -1347,7 +1347,7 @@ type ServicePrincipal struct { // The schema of the List response. Schemas []ServicePrincipalSchema `json:"schemas,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServicePrincipal) UnmarshalJSON(b []byte) error { @@ -1440,7 +1440,7 @@ type User struct { // Email address of the Databricks user. UserName string `json:"userName,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *User) UnmarshalJSON(b []byte) error { diff --git a/service/jobs/model.go b/service/jobs/model.go index f2303f89b..86fb2be2c 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -34,7 +34,7 @@ type BaseJob struct { // using the `resetJob` method. Settings *JobSettings `json:"settings,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *BaseJob) UnmarshalJSON(b []byte) error { @@ -192,7 +192,7 @@ type BaseRun struct { // Additional details about what triggered the run TriggerInfo *TriggerInfo `json:"trigger_info,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *BaseRun) UnmarshalJSON(b []byte) error { @@ -210,7 +210,7 @@ type CancelAllRuns struct { // The canonical identifier of the job to cancel all runs of. JobId int64 `json:"job_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CancelAllRuns) UnmarshalJSON(b []byte) error { @@ -350,7 +350,7 @@ type CleanRoomsNotebookTask struct { // Name of the notebook being run. NotebookName string `json:"notebook_name"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CleanRoomsNotebookTask) UnmarshalJSON(b []byte) error { @@ -390,7 +390,7 @@ type ClusterInstance struct { // available yet. SparkContextId string `json:"spark_context_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterInstance) UnmarshalJSON(b []byte) error { @@ -417,7 +417,7 @@ type ClusterSpec struct { // run. NewCluster *compute.ClusterSpec `json:"new_cluster,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterSpec) UnmarshalJSON(b []byte) error { @@ -638,7 +638,7 @@ type CreateJob struct { // begin or complete. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateJob) UnmarshalJSON(b []byte) error { @@ -654,7 +654,7 @@ type CreateResponse struct { // The canonical identifier for the newly created job. JobId int64 `json:"job_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateResponse) UnmarshalJSON(b []byte) error { @@ -689,7 +689,7 @@ type DbtOutput struct { // after the run has finished. ArtifactsLink string `json:"artifacts_link,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DbtOutput) UnmarshalJSON(b []byte) error { @@ -736,7 +736,7 @@ type DbtTask struct { // line argument. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DbtTask) UnmarshalJSON(b []byte) error { @@ -780,7 +780,7 @@ type EnforcePolicyComplianceForJobResponseJobClusterSettingsChange struct { // reading the settings field in the API response. PreviousValue string `json:"previous_value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EnforcePolicyComplianceForJobResponseJobClusterSettingsChange) UnmarshalJSON(b []byte) error { @@ -798,7 +798,7 @@ type EnforcePolicyComplianceRequest struct { // does not update the job. ValidateOnly bool `json:"validate_only,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EnforcePolicyComplianceRequest) UnmarshalJSON(b []byte) error { @@ -825,7 +825,7 @@ type EnforcePolicyComplianceResponse struct { // requirements. Settings *JobSettings `json:"settings,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EnforcePolicyComplianceResponse) UnmarshalJSON(b []byte) error { @@ -868,7 +868,7 @@ type FileArrivalTriggerConfiguration struct { // allowed value is 60 seconds. WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FileArrivalTriggerConfiguration) UnmarshalJSON(b []byte) error { @@ -897,7 +897,7 @@ type ForEachTask struct { // Configuration for the task that will be run for each element in the array Task Task `json:"task"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ForEachTask) UnmarshalJSON(b []byte) error { @@ -917,7 +917,7 @@ type ForEachTaskErrorMessageStats struct { // Describes the termination reason for the error message. TerminationCategory string `json:"termination_category,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ForEachTaskErrorMessageStats) UnmarshalJSON(b []byte) error { @@ -943,7 +943,7 @@ type ForEachTaskTaskRunStats struct { // Describes the length of the list of items to iterate over. TotalIterations int `json:"total_iterations,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ForEachTaskTaskRunStats) UnmarshalJSON(b []byte) error { @@ -1007,7 +1007,7 @@ type GetJobRequest struct { // next page of the job's sub-resources. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetJobRequest) UnmarshalJSON(b []byte) error { @@ -1037,7 +1037,7 @@ type GetPolicyComplianceResponse struct { // validation error. Violations map[string]string `json:"violations,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetPolicyComplianceResponse) UnmarshalJSON(b []byte) error { @@ -1067,7 +1067,7 @@ type GetRunRequest struct { // This field is required. RunId int64 `json:"-" url:"run_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetRunRequest) UnmarshalJSON(b []byte) error { @@ -1125,7 +1125,7 @@ type GitSnapshot struct { // was specified, this points to the commit the tag points to. UsedCommit string `json:"used_commit,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GitSnapshot) UnmarshalJSON(b []byte) error { @@ -1168,7 +1168,7 @@ type GitSource struct { // is source controlled. JobSource *JobSource `json:"job_source,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GitSource) UnmarshalJSON(b []byte) error { @@ -1215,7 +1215,7 @@ type Job struct { // using the `resetJob` method. Settings *JobSettings `json:"settings,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Job) UnmarshalJSON(b []byte) error { @@ -1236,7 +1236,7 @@ type JobAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -1259,7 +1259,7 @@ type JobAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -1291,7 +1291,7 @@ type JobCompliance struct { // validation error. Violations map[string]string `json:"violations,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobCompliance) UnmarshalJSON(b []byte) error { @@ -1310,7 +1310,7 @@ type JobDeployment struct { // Path of the file that contains deployment metadata. MetadataFilePath string `json:"metadata_file_path,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobDeployment) UnmarshalJSON(b []byte) error { @@ -1416,7 +1416,7 @@ type JobEmailNotifications struct { // notifications are not sent. OnSuccess []string `json:"on_success,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobEmailNotifications) UnmarshalJSON(b []byte) error { @@ -1444,7 +1444,7 @@ type JobNotificationSettings struct { // `on_failure` if the run is skipped. NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobNotificationSettings) UnmarshalJSON(b []byte) error { @@ -1463,7 +1463,7 @@ type JobParameter struct { // The value used in the run Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobParameter) UnmarshalJSON(b []byte) error { @@ -1489,7 +1489,7 @@ type JobPermission struct { // Permission level PermissionLevel JobPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobPermission) UnmarshalJSON(b []byte) error { @@ -1539,7 +1539,7 @@ type JobPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobPermissions) UnmarshalJSON(b []byte) error { @@ -1555,7 +1555,7 @@ type JobPermissionsDescription struct { // Permission level PermissionLevel JobPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -1585,7 +1585,7 @@ type JobRunAs struct { // field to their own email. UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobRunAs) UnmarshalJSON(b []byte) error { @@ -1706,7 +1706,7 @@ type JobSettings struct { // begin or complete. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobSettings) UnmarshalJSON(b []byte) error { @@ -1896,7 +1896,7 @@ type ListJobComplianceForPolicyResponse struct { // results for the request. PrevPageToken string `json:"prev_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListJobComplianceForPolicyResponse) UnmarshalJSON(b []byte) error { @@ -1919,7 +1919,7 @@ type ListJobComplianceRequest struct { // Canonical unique identifier for the cluster policy. PolicyId string `json:"-" url:"policy_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListJobComplianceRequest) UnmarshalJSON(b []byte) error { @@ -1949,7 +1949,7 @@ type ListJobsRequest struct { // request to list the next or previous page of jobs respectively. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListJobsRequest) UnmarshalJSON(b []byte) error { @@ -1974,7 +1974,7 @@ type ListJobsResponse struct { // applicable). PrevPageToken string `json:"prev_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListJobsResponse) UnmarshalJSON(b []byte) error { @@ -2026,7 +2026,7 @@ type ListRunsRequest struct { // filter by a time range. StartTimeTo int64 `json:"-" url:"start_time_to,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListRunsRequest) UnmarshalJSON(b []byte) error { @@ -2051,7 +2051,7 @@ type ListRunsResponse struct { // response if there are runs to list. Runs []BaseRun `json:"runs,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListRunsResponse) UnmarshalJSON(b []byte) error { @@ -2073,7 +2073,7 @@ type NotebookOutput struct { // Whether or not the result was truncated. Truncated bool `json:"truncated,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NotebookOutput) UnmarshalJSON(b []byte) error { @@ -2124,7 +2124,7 @@ type NotebookTask struct { // non-SQL cells, the run will fail. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NotebookTask) UnmarshalJSON(b []byte) error { @@ -2145,7 +2145,7 @@ type OutputSchemaInfo struct { SchemaName string `json:"schema_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *OutputSchemaInfo) UnmarshalJSON(b []byte) error { @@ -2254,7 +2254,7 @@ type PipelineParams struct { // If true, triggers a full refresh on the delta live table. FullRefresh bool `json:"full_refresh,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineParams) UnmarshalJSON(b []byte) error { @@ -2271,7 +2271,7 @@ type PipelineTask struct { // The full name of the pipeline task to execute. PipelineId string `json:"pipeline_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineTask) UnmarshalJSON(b []byte) error { @@ -2310,7 +2310,7 @@ type QueueDetails struct { // unstructured, and its exact format is subject to change. Message string `json:"message,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueueDetails) UnmarshalJSON(b []byte) error { @@ -2383,7 +2383,7 @@ type RepairHistoryItem struct { // or a repair run. Type RepairHistoryItemType `json:"type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RepairHistoryItem) UnmarshalJSON(b []byte) error { @@ -2525,7 +2525,7 @@ type RepairRun struct { // does not support custom parameters. SqlParams map[string]string `json:"sql_params,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RepairRun) UnmarshalJSON(b []byte) error { @@ -2542,7 +2542,7 @@ type RepairRunResponse struct { // `latest_repair_id` field to ensure sequential repairs. RepairId int64 `json:"repair_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RepairRunResponse) UnmarshalJSON(b []byte) error { @@ -2572,7 +2572,7 @@ type ResolvedConditionTaskValues struct { Right string `json:"right,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ResolvedConditionTaskValues) UnmarshalJSON(b []byte) error { @@ -2786,7 +2786,7 @@ type Run struct { // Additional details about what triggered the run TriggerInfo *TriggerInfo `json:"trigger_info,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Run) UnmarshalJSON(b []byte) error { @@ -2819,7 +2819,7 @@ type RunConditionTask struct { // a job state or parameter reference. Right string `json:"right"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunConditionTask) UnmarshalJSON(b []byte) error { @@ -2844,7 +2844,7 @@ type RunForEachTask struct { // Configuration for the task that will be run for each element in the array Task Task `json:"task"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunForEachTask) UnmarshalJSON(b []byte) error { @@ -2910,7 +2910,7 @@ type RunJobOutput struct { // The run id of the triggered job run RunId int64 `json:"run_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunJobOutput) UnmarshalJSON(b []byte) error { @@ -3230,7 +3230,7 @@ type RunNow struct { // does not support custom parameters. SqlParams map[string]string `json:"sql_params,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunNow) UnmarshalJSON(b []byte) error { @@ -3249,7 +3249,7 @@ type RunNowResponse struct { // The globally unique ID of the newly triggered run. RunId int64 `json:"run_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunNowResponse) UnmarshalJSON(b []byte) error { @@ -3301,7 +3301,7 @@ type RunOutput struct { // The output of a SQL task, if available. SqlOutput *SqlOutput `json:"sql_output,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunOutput) UnmarshalJSON(b []byte) error { @@ -3480,7 +3480,7 @@ type RunState struct { // the scheduler because the run timed out. UserCancelledOrTimedout bool `json:"user_cancelled_or_timedout,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunState) UnmarshalJSON(b []byte) error { @@ -3683,7 +3683,7 @@ type RunTask struct { // Task webhooks respect the task notification settings. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunTask) UnmarshalJSON(b []byte) error { @@ -3791,7 +3791,7 @@ type SparkJarTask struct { // Deprecated. A value of `false` is no longer supported. RunAsRepl bool `json:"run_as_repl,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SparkJarTask) UnmarshalJSON(b []byte) error { @@ -3855,7 +3855,7 @@ type SqlAlertOutput struct { // The canonical identifier of the SQL warehouse. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SqlAlertOutput) UnmarshalJSON(b []byte) error { @@ -3906,7 +3906,7 @@ type SqlDashboardOutput struct { // Widgets executed in the run. Only SQL query based widgets are listed. Widgets []SqlDashboardWidgetOutput `json:"widgets,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SqlDashboardOutput) UnmarshalJSON(b []byte) error { @@ -3933,7 +3933,7 @@ type SqlDashboardWidgetOutput struct { // The title of the SQL widget. WidgetTitle string `json:"widget_title,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SqlDashboardWidgetOutput) UnmarshalJSON(b []byte) error { @@ -3990,7 +3990,7 @@ type SqlOutputError struct { // The error message when execution fails. Message string `json:"message,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SqlOutputError) UnmarshalJSON(b []byte) error { @@ -4013,7 +4013,7 @@ type SqlQueryOutput struct { // The canonical identifier of the SQL warehouse. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SqlQueryOutput) UnmarshalJSON(b []byte) error { @@ -4028,7 +4028,7 @@ type SqlStatementOutput struct { // A key that can be used to look up query details. LookupKey string `json:"lookup_key,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SqlStatementOutput) UnmarshalJSON(b []byte) error { @@ -4067,7 +4067,7 @@ type SqlTaskAlert struct { // If specified, alert notifications are sent to subscribers. Subscriptions []SqlTaskSubscription `json:"subscriptions,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SqlTaskAlert) UnmarshalJSON(b []byte) error { @@ -4089,7 +4089,7 @@ type SqlTaskDashboard struct { // If specified, dashboard snapshots are sent to subscriptions. Subscriptions []SqlTaskSubscription `json:"subscriptions,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SqlTaskDashboard) UnmarshalJSON(b []byte) error { @@ -4131,7 +4131,7 @@ type SqlTaskSubscription struct { // destination_id and user_name for subscription notifications. UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SqlTaskSubscription) UnmarshalJSON(b []byte) error { @@ -4203,7 +4203,7 @@ type SubmitRun struct { // completes. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SubmitRun) UnmarshalJSON(b []byte) error { @@ -4219,7 +4219,7 @@ type SubmitRunResponse struct { // The canonical identifier for the newly submitted run. RunId int64 `json:"run_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SubmitRunResponse) UnmarshalJSON(b []byte) error { @@ -4334,7 +4334,7 @@ type SubmitTask struct { // Task webhooks respect the task notification settings. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SubmitTask) UnmarshalJSON(b []byte) error { @@ -4361,7 +4361,7 @@ type TableUpdateTriggerConfiguration struct { // seconds. WaitAfterLastChangeSeconds int `json:"wait_after_last_change_seconds,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TableUpdateTriggerConfiguration) UnmarshalJSON(b []byte) error { @@ -4499,7 +4499,7 @@ type Task struct { // notifications. WebhookNotifications *WebhookNotifications `json:"webhook_notifications,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Task) UnmarshalJSON(b []byte) error { @@ -4517,7 +4517,7 @@ type TaskDependency struct { // The name of the task this task depends on. TaskKey string `json:"task_key"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TaskDependency) UnmarshalJSON(b []byte) error { @@ -4563,7 +4563,7 @@ type TaskEmailNotifications struct { // notifications are not sent. OnSuccess []string `json:"on_success,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TaskEmailNotifications) UnmarshalJSON(b []byte) error { @@ -4586,7 +4586,7 @@ type TaskNotificationSettings struct { // `on_failure` if the run is skipped. NoAlertForSkippedRuns bool `json:"no_alert_for_skipped_runs,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TaskNotificationSettings) UnmarshalJSON(b []byte) error { @@ -4815,7 +4815,7 @@ type TerminationDetails struct { // [status page]: https://status.databricks.com/ Type TerminationTypeType `json:"type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TerminationDetails) UnmarshalJSON(b []byte) error { @@ -4878,7 +4878,7 @@ type TriggerInfo struct { // The run id of the Run Job task run RunId int64 `json:"run_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TriggerInfo) UnmarshalJSON(b []byte) error { @@ -4991,7 +4991,7 @@ type ViewItem struct { // Type of the view item. Type ViewType `json:"type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ViewItem) UnmarshalJSON(b []byte) error { diff --git a/service/marketplace/model.go b/service/marketplace/model.go index d96905e74..1dda602a9 100755 --- a/service/marketplace/model.go +++ b/service/marketplace/model.go @@ -153,7 +153,7 @@ type ContactInfo struct { LastName string `json:"last_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ContactInfo) UnmarshalJSON(b []byte) error { @@ -198,7 +198,7 @@ type CreateExchangeFilterRequest struct { type CreateExchangeFilterResponse struct { FilterId string `json:"filter_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateExchangeFilterResponse) UnmarshalJSON(b []byte) error { @@ -216,7 +216,7 @@ type CreateExchangeRequest struct { type CreateExchangeResponse struct { ExchangeId string `json:"exchange_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateExchangeResponse) UnmarshalJSON(b []byte) error { @@ -236,7 +236,7 @@ type CreateFileRequest struct { MimeType string `json:"mime_type"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateFileRequest) UnmarshalJSON(b []byte) error { @@ -252,7 +252,7 @@ type CreateFileResponse struct { // Pre-signed POST URL to blob storage UploadUrl string `json:"upload_url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateFileResponse) UnmarshalJSON(b []byte) error { @@ -276,7 +276,7 @@ type CreateInstallationRequest struct { ShareName string `json:"share_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateInstallationRequest) UnmarshalJSON(b []byte) error { @@ -294,7 +294,7 @@ type CreateListingRequest struct { type CreateListingResponse struct { ListingId string `json:"listing_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateListingResponse) UnmarshalJSON(b []byte) error { @@ -325,7 +325,7 @@ type CreatePersonalizationRequest struct { RecipientType DeltaSharingRecipientType `json:"recipient_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreatePersonalizationRequest) UnmarshalJSON(b []byte) error { @@ -339,7 +339,7 @@ func (s CreatePersonalizationRequest) MarshalJSON() ([]byte, error) { type CreatePersonalizationRequestResponse struct { Id string `json:"id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreatePersonalizationRequestResponse) UnmarshalJSON(b []byte) error { @@ -357,7 +357,7 @@ type CreateProviderRequest struct { type CreateProviderResponse struct { Id string `json:"id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateProviderResponse) UnmarshalJSON(b []byte) error { @@ -511,7 +511,7 @@ type Exchange struct { UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Exchange) UnmarshalJSON(b []byte) error { @@ -541,7 +541,7 @@ type ExchangeFilter struct { UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExchangeFilter) UnmarshalJSON(b []byte) error { @@ -592,7 +592,7 @@ type ExchangeListing struct { ListingName string `json:"listing_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExchangeListing) UnmarshalJSON(b []byte) error { @@ -625,7 +625,7 @@ type FileInfo struct { UpdatedAt int64 `json:"updated_at,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FileInfo) UnmarshalJSON(b []byte) error { @@ -641,7 +641,7 @@ type FileParent struct { // TODO make the following fields required ParentId string `json:"parent_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FileParent) UnmarshalJSON(b []byte) error { @@ -759,7 +759,7 @@ type GetLatestVersionProviderAnalyticsDashboardResponse struct { // version here is latest logical version of the dashboard template Version int64 `json:"version,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetLatestVersionProviderAnalyticsDashboardResponse) UnmarshalJSON(b []byte) error { @@ -778,7 +778,7 @@ type GetListingContentMetadataRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetListingContentMetadataRequest) UnmarshalJSON(b []byte) error { @@ -794,7 +794,7 @@ type GetListingContentMetadataResponse struct { SharedDataObjects []SharedDataObject `json:"shared_data_objects,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetListingContentMetadataResponse) UnmarshalJSON(b []byte) error { @@ -820,7 +820,7 @@ type GetListingsRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetListingsRequest) UnmarshalJSON(b []byte) error { @@ -836,7 +836,7 @@ type GetListingsResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetListingsResponse) UnmarshalJSON(b []byte) error { @@ -896,7 +896,7 @@ type InstallationDetail struct { Tokens []TokenInfo `json:"tokens,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InstallationDetail) UnmarshalJSON(b []byte) error { @@ -940,7 +940,7 @@ type ListAllInstallationsRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAllInstallationsRequest) UnmarshalJSON(b []byte) error { @@ -956,7 +956,7 @@ type ListAllInstallationsResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAllInstallationsResponse) UnmarshalJSON(b []byte) error { @@ -973,7 +973,7 @@ type ListAllPersonalizationRequestsRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAllPersonalizationRequestsRequest) UnmarshalJSON(b []byte) error { @@ -989,7 +989,7 @@ type ListAllPersonalizationRequestsResponse struct { PersonalizationRequests []PersonalizationRequest `json:"personalization_requests,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAllPersonalizationRequestsResponse) UnmarshalJSON(b []byte) error { @@ -1008,7 +1008,7 @@ type ListExchangeFiltersRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExchangeFiltersRequest) UnmarshalJSON(b []byte) error { @@ -1024,7 +1024,7 @@ type ListExchangeFiltersResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExchangeFiltersResponse) UnmarshalJSON(b []byte) error { @@ -1043,7 +1043,7 @@ type ListExchangesForListingRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExchangesForListingRequest) UnmarshalJSON(b []byte) error { @@ -1059,7 +1059,7 @@ type ListExchangesForListingResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExchangesForListingResponse) UnmarshalJSON(b []byte) error { @@ -1076,7 +1076,7 @@ type ListExchangesRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExchangesRequest) UnmarshalJSON(b []byte) error { @@ -1092,7 +1092,7 @@ type ListExchangesResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExchangesResponse) UnmarshalJSON(b []byte) error { @@ -1111,7 +1111,7 @@ type ListFilesRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListFilesRequest) UnmarshalJSON(b []byte) error { @@ -1127,7 +1127,7 @@ type ListFilesResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListFilesResponse) UnmarshalJSON(b []byte) error { @@ -1146,7 +1146,7 @@ type ListFulfillmentsRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListFulfillmentsRequest) UnmarshalJSON(b []byte) error { @@ -1162,7 +1162,7 @@ type ListFulfillmentsResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListFulfillmentsResponse) UnmarshalJSON(b []byte) error { @@ -1181,7 +1181,7 @@ type ListInstallationsRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListInstallationsRequest) UnmarshalJSON(b []byte) error { @@ -1197,7 +1197,7 @@ type ListInstallationsResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListInstallationsResponse) UnmarshalJSON(b []byte) error { @@ -1216,7 +1216,7 @@ type ListListingsForExchangeRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListListingsForExchangeRequest) UnmarshalJSON(b []byte) error { @@ -1232,7 +1232,7 @@ type ListListingsForExchangeResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListListingsForExchangeResponse) UnmarshalJSON(b []byte) error { @@ -1264,7 +1264,7 @@ type ListListingsRequest struct { // Matches any of the following tags Tags []ListingTag `json:"-" url:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListListingsRequest) UnmarshalJSON(b []byte) error { @@ -1280,7 +1280,7 @@ type ListListingsResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListListingsResponse) UnmarshalJSON(b []byte) error { @@ -1299,7 +1299,7 @@ type ListProviderAnalyticsDashboardResponse struct { Version int64 `json:"version,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListProviderAnalyticsDashboardResponse) UnmarshalJSON(b []byte) error { @@ -1318,7 +1318,7 @@ type ListProvidersRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListProvidersRequest) UnmarshalJSON(b []byte) error { @@ -1334,7 +1334,7 @@ type ListProvidersResponse struct { Providers []ProviderInfo `json:"providers,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListProvidersResponse) UnmarshalJSON(b []byte) error { @@ -1352,7 +1352,7 @@ type Listing struct { // Next Number: 26 Summary ListingSummary `json:"summary"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Listing) UnmarshalJSON(b []byte) error { @@ -1413,7 +1413,7 @@ type ListingDetail struct { // How often data is updated UpdateFrequency *DataRefreshInfo `json:"update_frequency,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListingDetail) UnmarshalJSON(b []byte) error { @@ -1540,7 +1540,7 @@ type ListingSummary struct { UpdatedById int64 `json:"updated_by_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListingSummary) UnmarshalJSON(b []byte) error { @@ -1674,7 +1674,7 @@ type PersonalizationRequest struct { UpdatedAt int64 `json:"updated_at,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PersonalizationRequest) UnmarshalJSON(b []byte) error { @@ -1749,7 +1749,7 @@ type ProviderInfo struct { TermOfServiceLink string `json:"term_of_service_link"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ProviderInfo) UnmarshalJSON(b []byte) error { @@ -1765,7 +1765,7 @@ type RegionInfo struct { Region string `json:"region,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegionInfo) UnmarshalJSON(b []byte) error { @@ -1817,7 +1817,7 @@ type SearchListingsRequest struct { // Fuzzy matches query Query string `json:"-" url:"query"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchListingsRequest) UnmarshalJSON(b []byte) error { @@ -1833,7 +1833,7 @@ type SearchListingsResponse struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchListingsResponse) UnmarshalJSON(b []byte) error { @@ -1857,7 +1857,7 @@ type SharedDataObject struct { // Name of the shared object Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SharedDataObject) UnmarshalJSON(b []byte) error { @@ -1879,7 +1879,7 @@ type TokenDetail struct { // managed-catalog/api/messages/recipient.proto ShareCredentialsVersion int `json:"shareCredentialsVersion,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TokenDetail) UnmarshalJSON(b []byte) error { @@ -1907,7 +1907,7 @@ type TokenInfo struct { // Username of Recipient Token updater. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TokenInfo) UnmarshalJSON(b []byte) error { @@ -1947,7 +1947,7 @@ type UpdateInstallationRequest struct { RotateToken bool `json:"rotate_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateInstallationRequest) UnmarshalJSON(b []byte) error { @@ -1983,7 +1983,7 @@ type UpdatePersonalizationRequestRequest struct { Status PersonalizationRequestStatus `json:"status"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdatePersonalizationRequestRequest) UnmarshalJSON(b []byte) error { @@ -2006,7 +2006,7 @@ type UpdateProviderAnalyticsDashboardRequest struct { // the dashboard template Version int64 `json:"version,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateProviderAnalyticsDashboardRequest) UnmarshalJSON(b []byte) error { @@ -2025,7 +2025,7 @@ type UpdateProviderAnalyticsDashboardResponse struct { Version int64 `json:"version,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateProviderAnalyticsDashboardResponse) UnmarshalJSON(b []byte) error { diff --git a/service/ml/model.go b/service/ml/model.go index bf9d039a4..139a34cc8 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -63,7 +63,7 @@ type Activity struct { // The username of the user that created the object. UserId string `json:"user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Activity) UnmarshalJSON(b []byte) error { @@ -191,7 +191,7 @@ type ApproveTransitionRequest struct { // Version of the model. Version string `json:"version"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ApproveTransitionRequest) UnmarshalJSON(b []byte) error { @@ -255,7 +255,7 @@ type CommentObject struct { // The username of the user that created the object. UserId string `json:"user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CommentObject) UnmarshalJSON(b []byte) error { @@ -293,7 +293,7 @@ type CreateExperiment struct { // guaranteed to support up to 20 tags per request. Tags []ExperimentTag `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateExperiment) UnmarshalJSON(b []byte) error { @@ -308,7 +308,7 @@ type CreateExperimentResponse struct { // Unique identifier for the experiment. ExperimentId string `json:"experiment_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateExperimentResponse) UnmarshalJSON(b []byte) error { @@ -327,7 +327,7 @@ type CreateModelRequest struct { // Additional metadata for registered model. Tags []ModelTag `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateModelRequest) UnmarshalJSON(b []byte) error { @@ -358,7 +358,7 @@ type CreateModelVersionRequest struct { // Additional metadata for model version. Tags []ModelVersionTag `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateModelVersionRequest) UnmarshalJSON(b []byte) error { @@ -427,7 +427,7 @@ type CreateRegistryWebhook struct { // not triggered on a real event. Status RegistryWebhookStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateRegistryWebhook) UnmarshalJSON(b []byte) error { @@ -450,7 +450,7 @@ type CreateRun struct { // tag instead. UserId string `json:"user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateRun) UnmarshalJSON(b []byte) error { @@ -484,7 +484,7 @@ type CreateTransitionRequest struct { // Version of the model. Version string `json:"version"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateTransitionRequest) UnmarshalJSON(b []byte) error { @@ -526,7 +526,7 @@ type Dataset struct { // MLflow. SourceType string `json:"source_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Dataset) UnmarshalJSON(b []byte) error { @@ -626,7 +626,7 @@ type DeleteRuns struct { // deleted. MaxTimestampMillis int64 `json:"max_timestamp_millis"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteRuns) UnmarshalJSON(b []byte) error { @@ -641,7 +641,7 @@ type DeleteRunsResponse struct { // The number of runs deleted. RunsDeleted int `json:"runs_deleted,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteRunsResponse) UnmarshalJSON(b []byte) error { @@ -685,7 +685,7 @@ type DeleteTransitionRequestRequest struct { // Version of the model. Version string `json:"-" url:"version"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteTransitionRequestRequest) UnmarshalJSON(b []byte) error { @@ -735,7 +735,7 @@ type DeleteWebhookRequest struct { // Webhook ID required to delete a registry webhook. Id string `json:"-" url:"id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteWebhookRequest) UnmarshalJSON(b []byte) error { @@ -766,7 +766,7 @@ type Experiment struct { // Tags: Additional metadata key-value pairs. Tags []ExperimentTag `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Experiment) UnmarshalJSON(b []byte) error { @@ -787,7 +787,7 @@ type ExperimentAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExperimentAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -810,7 +810,7 @@ type ExperimentAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExperimentAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -828,7 +828,7 @@ type ExperimentPermission struct { // Permission level PermissionLevel ExperimentPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExperimentPermission) UnmarshalJSON(b []byte) error { @@ -876,7 +876,7 @@ type ExperimentPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExperimentPermissions) UnmarshalJSON(b []byte) error { @@ -892,7 +892,7 @@ type ExperimentPermissionsDescription struct { // Permission level PermissionLevel ExperimentPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExperimentPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -915,7 +915,7 @@ type ExperimentTag struct { // The tag value. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExperimentTag) UnmarshalJSON(b []byte) error { @@ -934,7 +934,7 @@ type FileInfo struct { // Path relative to the root artifact directory run. Path string `json:"path,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FileInfo) UnmarshalJSON(b []byte) error { @@ -995,7 +995,7 @@ type GetHistoryRequest struct { // values. This field will be removed in a future MLflow version. RunUuid string `json:"-" url:"run_uuid,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetHistoryRequest) UnmarshalJSON(b []byte) error { @@ -1027,7 +1027,7 @@ type GetMetricHistoryResponse struct { // results NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetMetricHistoryResponse) UnmarshalJSON(b []byte) error { @@ -1060,7 +1060,7 @@ type GetModelVersionDownloadUriResponse struct { // URI corresponding to where artifacts for this model version are stored. ArtifactUri string `json:"artifact_uri,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetModelVersionDownloadUriResponse) UnmarshalJSON(b []byte) error { @@ -1108,7 +1108,7 @@ type GetRunRequest struct { // be removed in a future MLflow version. RunUuid string `json:"-" url:"run_uuid,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetRunRequest) UnmarshalJSON(b []byte) error { @@ -1146,7 +1146,7 @@ type HttpUrlSpec struct { // External HTTPS URL called on event trigger (by using a POST request). Url string `json:"url"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *HttpUrlSpec) UnmarshalJSON(b []byte) error { @@ -1169,7 +1169,7 @@ type HttpUrlSpecWithoutSecret struct { // External HTTPS URL called on event trigger (by using a POST request). Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *HttpUrlSpecWithoutSecret) UnmarshalJSON(b []byte) error { @@ -1186,7 +1186,7 @@ type InputTag struct { // The tag value. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *InputTag) UnmarshalJSON(b []byte) error { @@ -1207,7 +1207,7 @@ type JobSpec struct { // workspace where the webhook is created. WorkspaceUrl string `json:"workspace_url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobSpec) UnmarshalJSON(b []byte) error { @@ -1226,7 +1226,7 @@ type JobSpecWithoutSecret struct { // the job’s workspace is assumed to be the same as the webhook’s. WorkspaceUrl string `json:"workspace_url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *JobSpecWithoutSecret) UnmarshalJSON(b []byte) error { @@ -1255,7 +1255,7 @@ type ListArtifactsRequest struct { // This field will be removed in a future MLflow version. RunUuid string `json:"-" url:"run_uuid,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListArtifactsRequest) UnmarshalJSON(b []byte) error { @@ -1274,7 +1274,7 @@ type ListArtifactsResponse struct { // Root artifact directory for the run. RootUri string `json:"root_uri,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListArtifactsResponse) UnmarshalJSON(b []byte) error { @@ -1299,7 +1299,7 @@ type ListExperimentsRequest struct { // only active experiments. ViewType string `json:"-" url:"view_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExperimentsRequest) UnmarshalJSON(b []byte) error { @@ -1318,7 +1318,7 @@ type ListExperimentsResponse struct { // token means no more experiment is available for retrieval. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListExperimentsResponse) UnmarshalJSON(b []byte) error { @@ -1336,7 +1336,7 @@ type ListModelsRequest struct { // Pagination token to go to the next page based on a previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListModelsRequest) UnmarshalJSON(b []byte) error { @@ -1353,7 +1353,7 @@ type ListModelsResponse struct { RegisteredModels []Model `json:"registered_models,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListModelsResponse) UnmarshalJSON(b []byte) error { @@ -1370,7 +1370,7 @@ type ListRegistryWebhooks struct { // Array of registry webhooks. Webhooks []RegistryWebhook `json:"webhooks,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListRegistryWebhooks) UnmarshalJSON(b []byte) error { @@ -1406,7 +1406,7 @@ type ListWebhooksRequest struct { // Token indicating the page of artifact results to fetch PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListWebhooksRequest) UnmarshalJSON(b []byte) error { @@ -1430,7 +1430,7 @@ type LogBatch struct { // metrics, params, and tags in total. Tags []RunTag `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LogBatch) UnmarshalJSON(b []byte) error { @@ -1450,7 +1450,7 @@ type LogInputs struct { // ID of the run to log under RunId string `json:"run_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LogInputs) UnmarshalJSON(b []byte) error { @@ -1479,7 +1479,7 @@ type LogMetric struct { // Double value of the metric being logged. Value float64 `json:"value"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LogMetric) UnmarshalJSON(b []byte) error { @@ -1499,7 +1499,7 @@ type LogModel struct { // ID of the run to log under RunId string `json:"run_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LogModel) UnmarshalJSON(b []byte) error { @@ -1524,7 +1524,7 @@ type LogParam struct { // String value of the param being logged. Maximum size is 500 bytes. Value string `json:"value"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LogParam) UnmarshalJSON(b []byte) error { @@ -1548,7 +1548,7 @@ type Metric struct { // Value associated with this metric. Value float64 `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Metric) UnmarshalJSON(b []byte) error { @@ -1577,7 +1577,7 @@ type Model struct { // User that created this `registered_model` UserId string `json:"user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Model) UnmarshalJSON(b []byte) error { @@ -1609,7 +1609,7 @@ type ModelDatabricks struct { // The username of the user that created the object. UserId string `json:"user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ModelDatabricks) UnmarshalJSON(b []byte) error { @@ -1626,7 +1626,7 @@ type ModelTag struct { // The tag value. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ModelTag) UnmarshalJSON(b []byte) error { @@ -1668,7 +1668,7 @@ type ModelVersion struct { // Model's version number. Version string `json:"version,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ModelVersion) UnmarshalJSON(b []byte) error { @@ -1729,7 +1729,7 @@ type ModelVersionDatabricks struct { // Version of the model. Version string `json:"version,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ModelVersionDatabricks) UnmarshalJSON(b []byte) error { @@ -1776,7 +1776,7 @@ type ModelVersionTag struct { // The tag value. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ModelVersionTag) UnmarshalJSON(b []byte) error { @@ -1793,7 +1793,7 @@ type Param struct { // Value associated with this param. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Param) UnmarshalJSON(b []byte) error { @@ -1849,7 +1849,7 @@ type RegisteredModelAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegisteredModelAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -1872,7 +1872,7 @@ type RegisteredModelAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegisteredModelAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -1890,7 +1890,7 @@ type RegisteredModelPermission struct { // Permission level PermissionLevel RegisteredModelPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegisteredModelPermission) UnmarshalJSON(b []byte) error { @@ -1942,7 +1942,7 @@ type RegisteredModelPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegisteredModelPermissions) UnmarshalJSON(b []byte) error { @@ -1958,7 +1958,7 @@ type RegisteredModelPermissionsDescription struct { // Permission level PermissionLevel RegisteredModelPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegisteredModelPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -2034,7 +2034,7 @@ type RegistryWebhook struct { // not triggered on a real event. Status RegistryWebhookStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RegistryWebhook) UnmarshalJSON(b []byte) error { @@ -2151,7 +2151,7 @@ type RejectTransitionRequest struct { // Version of the model. Version string `json:"version"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RejectTransitionRequest) UnmarshalJSON(b []byte) error { @@ -2173,7 +2173,7 @@ type RenameModelRequest struct { // If provided, updates the name for this `registered_model`. NewName string `json:"new_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RenameModelRequest) UnmarshalJSON(b []byte) error { @@ -2215,7 +2215,7 @@ type RestoreRuns struct { // restored. MinTimestampMillis int64 `json:"min_timestamp_millis"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RestoreRuns) UnmarshalJSON(b []byte) error { @@ -2230,7 +2230,7 @@ type RestoreRunsResponse struct { // The number of runs restored. RunsRestored int `json:"runs_restored,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RestoreRunsResponse) UnmarshalJSON(b []byte) error { @@ -2285,7 +2285,7 @@ type RunInfo struct { // instead. UserId string `json:"user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunInfo) UnmarshalJSON(b []byte) error { @@ -2341,7 +2341,7 @@ type RunTag struct { // The tag value. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunTag) UnmarshalJSON(b []byte) error { @@ -2369,7 +2369,7 @@ type SearchExperiments struct { // only active experiments. ViewType SearchExperimentsViewType `json:"view_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchExperiments) UnmarshalJSON(b []byte) error { @@ -2387,7 +2387,7 @@ type SearchExperimentsResponse struct { // token means that no more experiments are available for retrieval. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchExperimentsResponse) UnmarshalJSON(b []byte) error { @@ -2444,7 +2444,7 @@ type SearchModelVersionsRequest struct { // Pagination token to go to next page based on previous search query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchModelVersionsRequest) UnmarshalJSON(b []byte) error { @@ -2462,7 +2462,7 @@ type SearchModelVersionsResponse struct { // query. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchModelVersionsResponse) UnmarshalJSON(b []byte) error { @@ -2488,7 +2488,7 @@ type SearchModelsRequest struct { // Pagination token to go to the next page based on a previous search query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchModelsRequest) UnmarshalJSON(b []byte) error { @@ -2505,7 +2505,7 @@ type SearchModelsResponse struct { // Registered Models that match the search criteria. RegisteredModels []Model `json:"registered_models,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchModelsResponse) UnmarshalJSON(b []byte) error { @@ -2547,7 +2547,7 @@ type SearchRuns struct { // only active runs. RunViewType SearchRunsRunViewType `json:"run_view_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchRuns) UnmarshalJSON(b []byte) error { @@ -2564,7 +2564,7 @@ type SearchRunsResponse struct { // Runs that match the search criteria. Runs []Run `json:"runs,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SearchRunsResponse) UnmarshalJSON(b []byte) error { @@ -2671,7 +2671,7 @@ type SetTag struct { // 5000 bytes in size. Value string `json:"value"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SetTag) UnmarshalJSON(b []byte) error { @@ -2776,7 +2776,7 @@ type TestRegistryWebhook struct { // Status code returned by the webhook URL StatusCode int `json:"status_code,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TestRegistryWebhook) UnmarshalJSON(b []byte) error { @@ -2822,7 +2822,7 @@ type TransitionModelVersionStageDatabricks struct { // Version of the model. Version string `json:"version"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TransitionModelVersionStageDatabricks) UnmarshalJSON(b []byte) error { @@ -2855,7 +2855,7 @@ type TransitionRequest struct { // The username of the user that created the object. UserId string `json:"user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TransitionRequest) UnmarshalJSON(b []byte) error { @@ -2889,7 +2889,7 @@ type UpdateExperiment struct { // name must be unique. NewName string `json:"new_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateExperiment) UnmarshalJSON(b []byte) error { @@ -2909,7 +2909,7 @@ type UpdateModelRequest struct { // Registered model unique name identifier. Name string `json:"name"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateModelRequest) UnmarshalJSON(b []byte) error { @@ -2931,7 +2931,7 @@ type UpdateModelVersionRequest struct { // Model version number Version string `json:"version"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateModelVersionRequest) UnmarshalJSON(b []byte) error { @@ -2998,7 +2998,7 @@ type UpdateRegistryWebhook struct { // not triggered on a real event. Status RegistryWebhookStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateRegistryWebhook) UnmarshalJSON(b []byte) error { @@ -3020,7 +3020,7 @@ type UpdateRun struct { // Updated status of the run. Status UpdateRunStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateRun) UnmarshalJSON(b []byte) error { diff --git a/service/oauth2/model.go b/service/oauth2/model.go index 1fa578111..9b6eb340c 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -14,7 +14,7 @@ type CreateAccountFederationPolicyRequest struct { // unspecified, the id will be assigned by Databricks. PolicyId string `json:"-" url:"policy_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateAccountFederationPolicyRequest) UnmarshalJSON(b []byte) error { @@ -43,7 +43,7 @@ type CreateCustomAppIntegration struct { // minted. Must be a subset of scopes. UserAuthorizedScopes []string `json:"user_authorized_scopes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateCustomAppIntegration) UnmarshalJSON(b []byte) error { @@ -63,7 +63,7 @@ type CreateCustomAppIntegrationOutput struct { // Unique integration id for the custom OAuth app IntegrationId string `json:"integration_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateCustomAppIntegrationOutput) UnmarshalJSON(b []byte) error { @@ -81,7 +81,7 @@ type CreatePublishedAppIntegration struct { // Token access policy TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreatePublishedAppIntegration) UnmarshalJSON(b []byte) error { @@ -96,7 +96,7 @@ type CreatePublishedAppIntegrationOutput struct { // Unique integration id for the published OAuth app IntegrationId string `json:"integration_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreatePublishedAppIntegrationOutput) UnmarshalJSON(b []byte) error { @@ -117,7 +117,7 @@ type CreateServicePrincipalFederationPolicyRequest struct { // The service principal id for the federation policy. ServicePrincipalId int64 `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateServicePrincipalFederationPolicyRequest) UnmarshalJSON(b []byte) error { @@ -148,7 +148,7 @@ type CreateServicePrincipalSecretResponse struct { // UTC time when the secret was updated UpdateTime string `json:"update_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateServicePrincipalSecretResponse) UnmarshalJSON(b []byte) error { @@ -221,7 +221,7 @@ type FederationPolicy struct { // Last update time of the federation policy. UpdateTime string `json:"update_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FederationPolicy) UnmarshalJSON(b []byte) error { @@ -265,7 +265,7 @@ type GetCustomAppIntegrationOutput struct { // minted. Must be a subset of scopes. UserAuthorizedScopes []string `json:"user_authorized_scopes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetCustomAppIntegrationOutput) UnmarshalJSON(b []byte) error { @@ -288,7 +288,7 @@ type GetCustomAppIntegrationsOutput struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetCustomAppIntegrationsOutput) UnmarshalJSON(b []byte) error { @@ -313,7 +313,7 @@ type GetPublishedAppIntegrationOutput struct { // Token access policy TokenAccessPolicy *TokenAccessPolicy `json:"token_access_policy,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetPublishedAppIntegrationOutput) UnmarshalJSON(b []byte) error { @@ -335,7 +335,7 @@ type GetPublishedAppIntegrationsOutput struct { NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetPublishedAppIntegrationsOutput) UnmarshalJSON(b []byte) error { @@ -353,7 +353,7 @@ type GetPublishedAppsOutput struct { // there are no more results to show. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetPublishedAppsOutput) UnmarshalJSON(b []byte) error { @@ -378,7 +378,7 @@ type ListAccountFederationPoliciesRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAccountFederationPoliciesRequest) UnmarshalJSON(b []byte) error { @@ -397,7 +397,7 @@ type ListCustomAppIntegrationsRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListCustomAppIntegrationsRequest) UnmarshalJSON(b []byte) error { @@ -413,7 +413,7 @@ type ListFederationPoliciesResponse struct { Policies []FederationPolicy `json:"policies,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListFederationPoliciesResponse) UnmarshalJSON(b []byte) error { @@ -431,7 +431,7 @@ type ListOAuthPublishedAppsRequest struct { // A token that can be used to get the next page of results. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListOAuthPublishedAppsRequest) UnmarshalJSON(b []byte) error { @@ -448,7 +448,7 @@ type ListPublishedAppIntegrationsRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListPublishedAppIntegrationsRequest) UnmarshalJSON(b []byte) error { @@ -467,7 +467,7 @@ type ListServicePrincipalFederationPoliciesRequest struct { // The service principal id for the federation policy. ServicePrincipalId int64 `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListServicePrincipalFederationPoliciesRequest) UnmarshalJSON(b []byte) error { @@ -493,7 +493,7 @@ type ListServicePrincipalSecretsRequest struct { // The service principal ID. ServicePrincipalId int64 `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListServicePrincipalSecretsRequest) UnmarshalJSON(b []byte) error { @@ -510,7 +510,7 @@ type ListServicePrincipalSecretsResponse struct { // List of the secrets Secrets []SecretInfo `json:"secrets,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListServicePrincipalSecretsResponse) UnmarshalJSON(b []byte) error { @@ -548,7 +548,7 @@ type OidcFederationPolicy struct { // default value is 'sub'. SubjectClaim string `json:"subject_claim,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *OidcFederationPolicy) UnmarshalJSON(b []byte) error { @@ -577,7 +577,7 @@ type PublishedAppOutput struct { // Required scopes for the published OAuth app. Scopes []string `json:"scopes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PublishedAppOutput) UnmarshalJSON(b []byte) error { @@ -600,7 +600,7 @@ type SecretInfo struct { // UTC time when the secret was updated UpdateTime string `json:"update_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SecretInfo) UnmarshalJSON(b []byte) error { @@ -617,7 +617,7 @@ type TokenAccessPolicy struct { // refresh token time to live in minutes RefreshTokenTtlInMinutes int `json:"refresh_token_ttl_in_minutes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TokenAccessPolicy) UnmarshalJSON(b []byte) error { @@ -641,7 +641,7 @@ type UpdateAccountFederationPolicyRequest struct { // the existing policy. Example value: 'description,oidc_policy.audiences'. UpdateMask string `json:"-" url:"update_mask,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateAccountFederationPolicyRequest) UnmarshalJSON(b []byte) error { @@ -696,7 +696,7 @@ type UpdateServicePrincipalFederationPolicyRequest struct { // the existing policy. Example value: 'description,oidc_policy.audiences'. UpdateMask string `json:"-" url:"update_mask,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateServicePrincipalFederationPolicyRequest) UnmarshalJSON(b []byte) error { diff --git a/service/pipelines/model.go b/service/pipelines/model.go index c3df70125..3cbab434b 100755 --- a/service/pipelines/model.go +++ b/service/pipelines/model.go @@ -78,7 +78,7 @@ type CreatePipeline struct { // Which pipeline trigger to use. Deprecated: Use `continuous` instead. Trigger *PipelineTrigger `json:"trigger,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreatePipeline) UnmarshalJSON(b []byte) error { @@ -96,7 +96,7 @@ type CreatePipelineResponse struct { // dry_run is false. PipelineId string `json:"pipeline_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreatePipelineResponse) UnmarshalJSON(b []byte) error { @@ -112,7 +112,7 @@ type CronTrigger struct { TimezoneId string `json:"timezone_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CronTrigger) UnmarshalJSON(b []byte) error { @@ -129,7 +129,7 @@ type DataPlaneId struct { // A sequence number, unique and increasing within the data plane instance. SeqNo int `json:"seq_no,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DataPlaneId) UnmarshalJSON(b []byte) error { @@ -288,7 +288,7 @@ type EditPipeline struct { // Which pipeline trigger to use. Deprecated: Use `continuous` instead. Trigger *PipelineTrigger `json:"trigger,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EditPipeline) UnmarshalJSON(b []byte) error { @@ -308,7 +308,7 @@ type ErrorDetail struct { // Whether this error is considered fatal, that is, unrecoverable. Fatal bool `json:"fatal,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ErrorDetail) UnmarshalJSON(b []byte) error { @@ -355,7 +355,7 @@ type FileLibrary struct { // The absolute path of the file. Path string `json:"path,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FileLibrary) UnmarshalJSON(b []byte) error { @@ -423,7 +423,7 @@ type GetPipelineResponse struct { // The pipeline state. State PipelineState `json:"state,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetPipelineResponse) UnmarshalJSON(b []byte) error { @@ -504,7 +504,7 @@ type IngestionGatewayPipelineDefinition struct { // storage location. GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *IngestionGatewayPipelineDefinition) UnmarshalJSON(b []byte) error { @@ -531,7 +531,7 @@ type IngestionPipelineDefinition struct { // are applied to all tables in the pipeline. TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *IngestionPipelineDefinition) UnmarshalJSON(b []byte) error { @@ -568,7 +568,7 @@ type ListPipelineEventsRequest struct { PipelineId string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListPipelineEventsRequest) UnmarshalJSON(b []byte) error { @@ -587,7 +587,7 @@ type ListPipelineEventsResponse struct { // If present, a token to fetch the previous page of events. PrevPageToken string `json:"prev_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListPipelineEventsResponse) UnmarshalJSON(b []byte) error { @@ -622,7 +622,7 @@ type ListPipelinesRequest struct { // Page token returned by previous call PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListPipelinesRequest) UnmarshalJSON(b []byte) error { @@ -639,7 +639,7 @@ type ListPipelinesResponse struct { // The list of events matching the request criteria. Statuses []PipelineStateInfo `json:"statuses,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListPipelinesResponse) UnmarshalJSON(b []byte) error { @@ -661,7 +661,7 @@ type ListUpdatesRequest struct { // If present, returns updates until and including this update_id. UntilUpdateId string `json:"-" url:"until_update_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListUpdatesRequest) UnmarshalJSON(b []byte) error { @@ -682,7 +682,7 @@ type ListUpdatesResponse struct { Updates []UpdateInfo `json:"updates,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListUpdatesResponse) UnmarshalJSON(b []byte) error { @@ -730,7 +730,7 @@ type NotebookLibrary struct { // The absolute path of the notebook. Path string `json:"path,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NotebookLibrary) UnmarshalJSON(b []byte) error { @@ -791,7 +791,7 @@ type Origin struct { // The id of an execution. Globally unique. UpdateId string `json:"update_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Origin) UnmarshalJSON(b []byte) error { @@ -812,7 +812,7 @@ type PipelineAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -835,7 +835,7 @@ type PipelineAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -942,7 +942,7 @@ type PipelineCluster struct { // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []string `json:"ssh_public_keys,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineCluster) UnmarshalJSON(b []byte) error { @@ -1007,7 +1007,7 @@ type PipelineDeployment struct { // The path to the file containing metadata about the deployment. MetadataFilePath string `json:"metadata_file_path,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineDeployment) UnmarshalJSON(b []byte) error { @@ -1038,7 +1038,7 @@ type PipelineEvent struct { // The time of the event. Timestamp string `json:"timestamp,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineEvent) UnmarshalJSON(b []byte) error { @@ -1063,7 +1063,7 @@ type PipelineLibrary struct { // URI of the whl to be installed. Whl string `json:"whl,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineLibrary) UnmarshalJSON(b []byte) error { @@ -1081,7 +1081,7 @@ type PipelinePermission struct { // Permission level PermissionLevel PipelinePermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelinePermission) UnmarshalJSON(b []byte) error { @@ -1131,7 +1131,7 @@ type PipelinePermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelinePermissions) UnmarshalJSON(b []byte) error { @@ -1147,7 +1147,7 @@ type PipelinePermissionsDescription struct { // Permission level PermissionLevel PipelinePermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelinePermissionsDescription) UnmarshalJSON(b []byte) error { @@ -1221,7 +1221,7 @@ type PipelineSpec struct { // Which pipeline trigger to use. Deprecated: Use `continuous` instead. Trigger *PipelineTrigger `json:"trigger,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineSpec) UnmarshalJSON(b []byte) error { @@ -1294,7 +1294,7 @@ type PipelineStateInfo struct { // The pipeline state. State PipelineState `json:"state,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PipelineStateInfo) UnmarshalJSON(b []byte) error { @@ -1354,7 +1354,7 @@ type ReportSpec struct { // IngestionPipelineDefinition object. TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ReportSpec) UnmarshalJSON(b []byte) error { @@ -1379,7 +1379,7 @@ type RestartWindow struct { // for details. If not specified, UTC will be used. TimeZoneId string `json:"time_zone_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RestartWindow) UnmarshalJSON(b []byte) error { @@ -1404,7 +1404,7 @@ type RunAs struct { // their own email. UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RunAs) UnmarshalJSON(b []byte) error { @@ -1432,7 +1432,7 @@ type SchemaSpec struct { // table_configuration defined in the IngestionPipelineDefinition object. TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SchemaSpec) UnmarshalJSON(b []byte) error { @@ -1449,7 +1449,7 @@ type Sequencing struct { // the ID assigned by the data plane. DataPlaneId *DataPlaneId `json:"data_plane_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Sequencing) UnmarshalJSON(b []byte) error { @@ -1468,7 +1468,7 @@ type SerializedException struct { // Stack trace consisting of a list of stack frames Stack []StackFrame `json:"stack,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SerializedException) UnmarshalJSON(b []byte) error { @@ -1489,7 +1489,7 @@ type StackFrame struct { // Name of the method which was called MethodName string `json:"method_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *StackFrame) UnmarshalJSON(b []byte) error { @@ -1520,7 +1520,7 @@ type StartUpdate struct { // code but does not materialize or publish any datasets. ValidateOnly bool `json:"validate_only,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *StartUpdate) UnmarshalJSON(b []byte) error { @@ -1569,7 +1569,7 @@ func (f *StartUpdateCause) Type() string { type StartUpdateResponse struct { UpdateId string `json:"update_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *StartUpdateResponse) UnmarshalJSON(b []byte) error { @@ -1608,7 +1608,7 @@ type TableSpec struct { // IngestionPipelineDefinition object and the SchemaSpec. TableConfiguration *TableSpecificConfig `json:"table_configuration,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TableSpec) UnmarshalJSON(b []byte) error { @@ -1632,7 +1632,7 @@ type TableSpecificConfig struct { // arrive out of order. SequenceBy []string `json:"sequence_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TableSpecificConfig) UnmarshalJSON(b []byte) error { @@ -1703,7 +1703,7 @@ type UpdateInfo struct { // code but does not materialize or publish any datasets. ValidateOnly bool `json:"validate_only,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateInfo) UnmarshalJSON(b []byte) error { @@ -1803,7 +1803,7 @@ type UpdateStateInfo struct { UpdateId string `json:"update_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateStateInfo) UnmarshalJSON(b []byte) error { diff --git a/service/pkg.go b/service/pkg.go index 0bd4cac58..d42c20d64 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -337,8 +337,8 @@ var ( _ *marketplace.ConsumerListingsAPI = nil _ *marketplace.ConsumerPersonalizationRequestsAPI = nil _ *marketplace.ConsumerProvidersAPI = nil - _ *provisioning.CredentialsAPI = nil _ *catalog.CredentialsAPI = nil + _ *provisioning.CredentialsAPI = nil _ *settings.CredentialsManagerAPI = nil _ *settings.CspEnablementAccountAPI = nil _ *iam.CurrentUserAPI = nil diff --git a/service/provisioning/model.go b/service/provisioning/model.go index ddaa35c85..af68dac4a 100755 --- a/service/provisioning/model.go +++ b/service/provisioning/model.go @@ -25,7 +25,7 @@ type AwsKeyInfo struct { // EBS volumes, set to `false`. ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AwsKeyInfo) UnmarshalJSON(b []byte) error { @@ -42,7 +42,7 @@ type AzureWorkspaceInfo struct { // Azure Subscription ID SubscriptionId string `json:"subscription_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AzureWorkspaceInfo) UnmarshalJSON(b []byte) error { @@ -71,7 +71,7 @@ type CreateAwsKeyInfo struct { // set this to `false`. ReuseKeyForClusterVolumes bool `json:"reuse_key_for_cluster_volumes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateAwsKeyInfo) UnmarshalJSON(b []byte) error { @@ -96,7 +96,7 @@ type CreateCredentialStsRole struct { // The Amazon Resource Name (ARN) of the cross account role. RoleArn string `json:"role_arn,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateCredentialStsRole) UnmarshalJSON(b []byte) error { @@ -141,7 +141,7 @@ type CreateNetworkRequest struct { // multiple network configurations. VpcId string `json:"vpc_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateNetworkRequest) UnmarshalJSON(b []byte) error { @@ -170,7 +170,7 @@ type CreateVpcEndpointRequest struct { // The human-readable name of the storage configuration. VpcEndpointName string `json:"vpc_endpoint_name"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateVpcEndpointRequest) UnmarshalJSON(b []byte) error { @@ -293,7 +293,7 @@ type CreateWorkspaceRequest struct { // The workspace's human-readable name. WorkspaceName string `json:"workspace_name"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateWorkspaceRequest) UnmarshalJSON(b []byte) error { @@ -316,7 +316,7 @@ type Credential struct { // The human-readable name of the credential configuration object. CredentialsName string `json:"credentials_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Credential) UnmarshalJSON(b []byte) error { @@ -339,7 +339,7 @@ type CustomerFacingGcpCloudResourceContainer struct { // cloud resources for your workspace. ProjectId string `json:"project_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CustomerFacingGcpCloudResourceContainer) UnmarshalJSON(b []byte) error { @@ -364,7 +364,7 @@ type CustomerManagedKey struct { // The cases that the key can be used for. UseCases []KeyUseCase `json:"use_cases,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CustomerManagedKey) UnmarshalJSON(b []byte) error { @@ -494,7 +494,7 @@ type ExternalCustomerInfo struct { // The legal entity name for the external workspace CustomerName string `json:"customer_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExternalCustomerInfo) UnmarshalJSON(b []byte) error { @@ -544,7 +544,7 @@ type GcpManagedNetworkConfig struct { // `/9` and no smaller than `/29`. SubnetCidr string `json:"subnet_cidr,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GcpManagedNetworkConfig) UnmarshalJSON(b []byte) error { @@ -593,7 +593,7 @@ type GcpVpcEndpointInfo struct { // The service attachment this PSC connection connects to. ServiceAttachmentId string `json:"service_attachment_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GcpVpcEndpointInfo) UnmarshalJSON(b []byte) error { @@ -663,7 +663,7 @@ type GkeConfig struct { // It must be exactly as big as `/28`. MasterIpRange string `json:"master_ip_range,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GkeConfig) UnmarshalJSON(b []byte) error { @@ -777,7 +777,7 @@ type Network struct { // Workspace ID associated with this network configuration. WorkspaceId int64 `json:"workspace_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Network) UnmarshalJSON(b []byte) error { @@ -795,7 +795,7 @@ type NetworkHealth struct { // security group, or network ACL. ErrorType ErrorType `json:"error_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NetworkHealth) UnmarshalJSON(b []byte) error { @@ -826,7 +826,7 @@ type NetworkWarning struct { // group. WarningType WarningType `json:"warning_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NetworkWarning) UnmarshalJSON(b []byte) error { @@ -935,7 +935,7 @@ type PrivateAccessSettings struct { // object. Region string `json:"region,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PrivateAccessSettings) UnmarshalJSON(b []byte) error { @@ -954,7 +954,7 @@ type RootBucketInfo struct { // The name of the S3 bucket. BucketName string `json:"bucket_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RootBucketInfo) UnmarshalJSON(b []byte) error { @@ -977,7 +977,7 @@ type StorageConfiguration struct { // The human-readable name of the storage configuration. StorageConfigurationName string `json:"storage_configuration_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *StorageConfiguration) UnmarshalJSON(b []byte) error { @@ -995,7 +995,7 @@ type StsRole struct { // The Amazon Resource Name (ARN) of the cross account role. RoleArn string `json:"role_arn,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *StsRole) UnmarshalJSON(b []byte) error { @@ -1043,7 +1043,7 @@ type UpdateWorkspaceRequest struct { // Workspace ID. WorkspaceId int64 `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateWorkspaceRequest) UnmarshalJSON(b []byte) error { @@ -1091,7 +1091,7 @@ type UpsertPrivateAccessSettingsRequest struct { // settings object. Region string `json:"region"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpsertPrivateAccessSettingsRequest) UnmarshalJSON(b []byte) error { @@ -1139,7 +1139,7 @@ type VpcEndpoint struct { // The human-readable name of the storage configuration. VpcEndpointName string `json:"vpc_endpoint_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *VpcEndpoint) UnmarshalJSON(b []byte) error { @@ -1315,7 +1315,7 @@ type Workspace struct { // Message describing the current workspace status. WorkspaceStatusMessage string `json:"workspace_status_message,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Workspace) UnmarshalJSON(b []byte) error { diff --git a/service/serving/model.go b/service/serving/model.go index 749c6a71b..3087942d7 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -21,7 +21,7 @@ type Ai21LabsConfig struct { // `ai21labs_api_key` or `ai21labs_api_key_plaintext`. Ai21labsApiKeyPlaintext string `json:"ai21labs_api_key_plaintext,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Ai21LabsConfig) UnmarshalJSON(b []byte) error { @@ -60,7 +60,7 @@ type AiGatewayGuardrailParameters struct { // the request if its topic is not in the allowed topics. ValidTopics []string `json:"valid_topics,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AiGatewayGuardrailParameters) UnmarshalJSON(b []byte) error { @@ -125,7 +125,7 @@ type AiGatewayInferenceTableConfig struct { // disable inference table first in order to change the prefix name. TableNamePrefix string `json:"table_name_prefix,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AiGatewayInferenceTableConfig) UnmarshalJSON(b []byte) error { @@ -204,7 +204,7 @@ type AiGatewayUsageTrackingConfig struct { // Whether to enable usage tracking. Enabled bool `json:"enabled,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AiGatewayUsageTrackingConfig) UnmarshalJSON(b []byte) error { @@ -248,7 +248,7 @@ type AmazonBedrockConfig struct { // insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. BedrockProvider AmazonBedrockConfigBedrockProvider `json:"bedrock_provider"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AmazonBedrockConfig) UnmarshalJSON(b []byte) error { @@ -302,7 +302,7 @@ type AnthropicConfig struct { // `anthropic_api_key` or `anthropic_api_key_plaintext`. AnthropicApiKeyPlaintext string `json:"anthropic_api_key_plaintext,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AnthropicConfig) UnmarshalJSON(b []byte) error { @@ -326,7 +326,7 @@ type AutoCaptureConfigInput struct { // change the prefix name if the inference table is already enabled. TableNamePrefix string `json:"table_name_prefix,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AutoCaptureConfigInput) UnmarshalJSON(b []byte) error { @@ -352,7 +352,7 @@ type AutoCaptureConfigOutput struct { // change the prefix name if the inference table is already enabled. TableNamePrefix string `json:"table_name_prefix,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AutoCaptureConfigOutput) UnmarshalJSON(b []byte) error { @@ -388,7 +388,7 @@ type ChatMessage struct { // The role of the message. One of [system, user, assistant]. Role ChatMessageRole `json:"role,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ChatMessage) UnmarshalJSON(b []byte) error { @@ -444,7 +444,7 @@ type CohereConfig struct { // `cohere_api_key` or `cohere_api_key_plaintext`. CohereApiKeyPlaintext string `json:"cohere_api_key_plaintext,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CohereConfig) UnmarshalJSON(b []byte) error { @@ -475,7 +475,7 @@ type CreateServingEndpoint struct { // to billing logs. Tags []EndpointTag `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateServingEndpoint) UnmarshalJSON(b []byte) error { @@ -493,7 +493,7 @@ type DataPlaneInfo struct { // The URL of the endpoint for this operation in the dataplane. EndpointUrl string `json:"endpoint_url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DataPlaneInfo) UnmarshalJSON(b []byte) error { @@ -523,7 +523,7 @@ type DatabricksModelServingConfig struct { // pointed to by this external model. DatabricksWorkspaceUrl string `json:"databricks_workspace_url"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DatabricksModelServingConfig) UnmarshalJSON(b []byte) error { @@ -557,7 +557,7 @@ type EmbeddingsV1ResponseEmbeddingElement struct { // This will always be 'embedding'. Object EmbeddingsV1ResponseEmbeddingElementObject `json:"object,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EmbeddingsV1ResponseEmbeddingElement) UnmarshalJSON(b []byte) error { @@ -629,7 +629,7 @@ type EndpointCoreConfigOutput struct { // The traffic configuration associated with the serving endpoint config. TrafficConfig *TrafficConfig `json:"traffic_config,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EndpointCoreConfigOutput) UnmarshalJSON(b []byte) error { @@ -669,7 +669,7 @@ type EndpointPendingConfig struct { // should be routed. TrafficConfig *TrafficConfig `json:"traffic_config,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EndpointPendingConfig) UnmarshalJSON(b []byte) error { @@ -758,7 +758,7 @@ type EndpointTag struct { // Optional value field for a serving endpoint tag. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EndpointTag) UnmarshalJSON(b []byte) error { @@ -801,7 +801,7 @@ type ExternalFunctionRequest struct { // The relative path for the API endpoint. This is required. Path string `json:"path"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExternalFunctionRequest) UnmarshalJSON(b []byte) error { @@ -922,7 +922,7 @@ type ExternalModelUsageElement struct { // The total number of tokens in the prompt and response. TotalTokens int `json:"total_tokens,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExternalModelUsageElement) UnmarshalJSON(b []byte) error { @@ -944,7 +944,7 @@ type FoundationModel struct { Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *FoundationModel) UnmarshalJSON(b []byte) error { @@ -1021,7 +1021,7 @@ type GoogleCloudVertexAiConfig struct { // https://cloud.google.com/vertex-ai/docs/general/locations Region string `json:"region"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GoogleCloudVertexAiConfig) UnmarshalJSON(b []byte) error { @@ -1112,7 +1112,7 @@ type OpenAiConfig struct { // OpenAI. OpenaiOrganization string `json:"openai_organization,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *OpenAiConfig) UnmarshalJSON(b []byte) error { @@ -1135,7 +1135,7 @@ type PaLmConfig struct { // `palm_api_key_plaintext`. PalmApiKeyPlaintext string `json:"palm_api_key_plaintext,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PaLmConfig) UnmarshalJSON(b []byte) error { @@ -1163,7 +1163,7 @@ type PayloadTable struct { StatusMessage string `json:"status_message,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PayloadTable) UnmarshalJSON(b []byte) error { @@ -1273,7 +1273,7 @@ type QueryEndpointInput struct { // query fields. Temperature float64 `json:"temperature,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryEndpointInput) UnmarshalJSON(b []byte) error { @@ -1314,7 +1314,7 @@ type QueryEndpointResponse struct { // tokens used in the prompt and response. Usage *ExternalModelUsageElement `json:"usage,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryEndpointResponse) UnmarshalJSON(b []byte) error { @@ -1487,7 +1487,7 @@ type ServedEntityInput struct { // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types WorkloadType ServingModelWorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServedEntityInput) UnmarshalJSON(b []byte) error { @@ -1564,7 +1564,7 @@ type ServedEntityOutput struct { // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types WorkloadType ServingModelWorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServedEntityOutput) UnmarshalJSON(b []byte) error { @@ -1587,7 +1587,7 @@ type ServedEntitySpec struct { Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServedEntitySpec) UnmarshalJSON(b []byte) error { @@ -1643,7 +1643,7 @@ type ServedModelInput struct { // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types WorkloadType ServedModelInputWorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServedModelInput) UnmarshalJSON(b []byte) error { @@ -1762,7 +1762,7 @@ type ServedModelOutput struct { // [GPU types]: https://docs.databricks.com/en/machine-learning/model-serving/create-manage-serving-endpoints.html#gpu-workload-types WorkloadType ServingModelWorkloadType `json:"workload_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServedModelOutput) UnmarshalJSON(b []byte) error { @@ -1781,7 +1781,7 @@ type ServedModelSpec struct { Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServedModelSpec) UnmarshalJSON(b []byte) error { @@ -1797,7 +1797,7 @@ type ServedModelState struct { DeploymentStateMessage string `json:"deployment_state_message,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServedModelState) UnmarshalJSON(b []byte) error { @@ -1872,7 +1872,7 @@ type ServingEndpoint struct { // The task type of the serving endpoint. Task string `json:"task,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServingEndpoint) UnmarshalJSON(b []byte) error { @@ -1893,7 +1893,7 @@ type ServingEndpointAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServingEndpointAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -1916,7 +1916,7 @@ type ServingEndpointAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServingEndpointAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -1963,7 +1963,7 @@ type ServingEndpointDetailed struct { // The task type of the serving endpoint. Task string `json:"task,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServingEndpointDetailed) UnmarshalJSON(b []byte) error { @@ -2010,7 +2010,7 @@ type ServingEndpointPermission struct { // Permission level PermissionLevel ServingEndpointPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServingEndpointPermission) UnmarshalJSON(b []byte) error { @@ -2058,7 +2058,7 @@ type ServingEndpointPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServingEndpointPermissions) UnmarshalJSON(b []byte) error { @@ -2074,7 +2074,7 @@ type ServingEndpointPermissionsDescription struct { // Permission level PermissionLevel ServingEndpointPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServingEndpointPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -2141,7 +2141,7 @@ type V1ResponseChoiceElement struct { // The text response from the __completions__ endpoint. Text string `json:"text,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *V1ResponseChoiceElement) UnmarshalJSON(b []byte) error { diff --git a/service/settings/model.go b/service/settings/model.go index c2dded259..b4ec75db2 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -25,7 +25,7 @@ type AccountIpAccessEnable struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AccountIpAccessEnable) UnmarshalJSON(b []byte) error { @@ -86,7 +86,7 @@ type AibiDashboardEmbeddingAccessPolicySetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AibiDashboardEmbeddingAccessPolicySetting) UnmarshalJSON(b []byte) error { @@ -118,7 +118,7 @@ type AibiDashboardEmbeddingApprovedDomainsSetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AibiDashboardEmbeddingApprovedDomainsSetting) UnmarshalJSON(b []byte) error { @@ -146,7 +146,7 @@ type AutomaticClusterUpdateSetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AutomaticClusterUpdateSetting) UnmarshalJSON(b []byte) error { @@ -160,7 +160,7 @@ func (s AutomaticClusterUpdateSetting) MarshalJSON() ([]byte, error) { type BooleanMessage struct { Value bool `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *BooleanMessage) UnmarshalJSON(b []byte) error { @@ -187,7 +187,7 @@ type ClusterAutoRestartMessage struct { RestartEvenIfNoUpdatesAvailable bool `json:"restart_even_if_no_updates_available,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterAutoRestartMessage) UnmarshalJSON(b []byte) error { @@ -213,7 +213,7 @@ type ClusterAutoRestartMessageEnablementDetails struct { // The feature is unavailable if the customer doesn't have enterprise tier UnavailableForNonEnterpriseTier bool `json:"unavailable_for_non_enterprise_tier,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterAutoRestartMessageEnablementDetails) UnmarshalJSON(b []byte) error { @@ -315,7 +315,7 @@ type ClusterAutoRestartMessageMaintenanceWindowWindowStartTime struct { Minutes int `json:"minutes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClusterAutoRestartMessageMaintenanceWindowWindowStartTime) UnmarshalJSON(b []byte) error { @@ -333,7 +333,7 @@ type ComplianceSecurityProfile struct { IsEnabled bool `json:"is_enabled,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ComplianceSecurityProfile) UnmarshalJSON(b []byte) error { @@ -362,7 +362,7 @@ type ComplianceSecurityProfileSetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ComplianceSecurityProfileSetting) UnmarshalJSON(b []byte) error { @@ -472,7 +472,7 @@ type CreateNotificationDestinationRequest struct { // The display name for the notification destination. DisplayName string `json:"display_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateNotificationDestinationRequest) UnmarshalJSON(b []byte) error { @@ -492,7 +492,7 @@ type CreateOboTokenRequest struct { // The number of seconds before the token expires. LifetimeSeconds int64 `json:"lifetime_seconds,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateOboTokenRequest) UnmarshalJSON(b []byte) error { @@ -509,7 +509,7 @@ type CreateOboTokenResponse struct { // Value of the token. TokenValue string `json:"token_value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateOboTokenResponse) UnmarshalJSON(b []byte) error { @@ -573,7 +573,7 @@ type CreateTokenRequest struct { // If the lifetime is not specified, this token remains valid indefinitely. LifetimeSeconds int64 `json:"lifetime_seconds,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateTokenRequest) UnmarshalJSON(b []byte) error { @@ -590,7 +590,7 @@ type CreateTokenResponse struct { // The value of the new token. TokenValue string `json:"token_value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateTokenResponse) UnmarshalJSON(b []byte) error { @@ -609,7 +609,7 @@ type CspEnablementAccount struct { // Enforced = it cannot be overriden at workspace level. IsEnforced bool `json:"is_enforced,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CspEnablementAccount) UnmarshalJSON(b []byte) error { @@ -638,7 +638,7 @@ type CspEnablementAccountSetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CspEnablementAccountSetting) UnmarshalJSON(b []byte) error { @@ -676,7 +676,7 @@ type DefaultNamespaceSetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DefaultNamespaceSetting) UnmarshalJSON(b []byte) error { @@ -698,7 +698,7 @@ type DeleteAccountIpAccessEnableRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteAccountIpAccessEnableRequest) UnmarshalJSON(b []byte) error { @@ -738,7 +738,7 @@ type DeleteAibiDashboardEmbeddingAccessPolicySettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteAibiDashboardEmbeddingAccessPolicySettingRequest) UnmarshalJSON(b []byte) error { @@ -772,7 +772,7 @@ type DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteAibiDashboardEmbeddingApprovedDomainsSettingRequest) UnmarshalJSON(b []byte) error { @@ -806,7 +806,7 @@ type DeleteDefaultNamespaceSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteDefaultNamespaceSettingRequest) UnmarshalJSON(b []byte) error { @@ -840,7 +840,7 @@ type DeleteDisableLegacyAccessRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteDisableLegacyAccessRequest) UnmarshalJSON(b []byte) error { @@ -874,7 +874,7 @@ type DeleteDisableLegacyDbfsRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteDisableLegacyDbfsRequest) UnmarshalJSON(b []byte) error { @@ -908,7 +908,7 @@ type DeleteDisableLegacyFeaturesRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteDisableLegacyFeaturesRequest) UnmarshalJSON(b []byte) error { @@ -962,7 +962,7 @@ type DeletePersonalComputeSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeletePersonalComputeSettingRequest) UnmarshalJSON(b []byte) error { @@ -1007,7 +1007,7 @@ type DeleteRestrictWorkspaceAdminsSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteRestrictWorkspaceAdminsSettingRequest) UnmarshalJSON(b []byte) error { @@ -1086,7 +1086,7 @@ type DisableLegacyAccess struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DisableLegacyAccess) UnmarshalJSON(b []byte) error { @@ -1114,7 +1114,7 @@ type DisableLegacyDbfs struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DisableLegacyDbfs) UnmarshalJSON(b []byte) error { @@ -1142,7 +1142,7 @@ type DisableLegacyFeatures struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DisableLegacyFeatures) UnmarshalJSON(b []byte) error { @@ -1192,7 +1192,7 @@ type EgressNetworkPolicyInternetAccessPolicyInternetDestination struct { Type EgressNetworkPolicyInternetAccessPolicyInternetDestinationInternetDestinationType `json:"type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EgressNetworkPolicyInternetAccessPolicyInternetDestination) UnmarshalJSON(b []byte) error { @@ -1372,7 +1372,7 @@ type EgressNetworkPolicyInternetAccessPolicyStorageDestination struct { Type EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType `json:"type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EgressNetworkPolicyInternetAccessPolicyStorageDestination) UnmarshalJSON(b []byte) error { @@ -1426,7 +1426,7 @@ type Empty struct { type EnhancedSecurityMonitoring struct { IsEnabled bool `json:"is_enabled,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EnhancedSecurityMonitoring) UnmarshalJSON(b []byte) error { @@ -1455,7 +1455,7 @@ type EnhancedSecurityMonitoringSetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EnhancedSecurityMonitoringSetting) UnmarshalJSON(b []byte) error { @@ -1470,7 +1470,7 @@ func (s EnhancedSecurityMonitoringSetting) MarshalJSON() ([]byte, error) { type EsmEnablementAccount struct { IsEnforced bool `json:"is_enforced,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EsmEnablementAccount) UnmarshalJSON(b []byte) error { @@ -1499,7 +1499,7 @@ type EsmEnablementAccountSetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EsmEnablementAccountSetting) UnmarshalJSON(b []byte) error { @@ -1524,7 +1524,7 @@ type ExchangeToken struct { // The type of this exchange token TokenType TokenType `json:"tokenType,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExchangeToken) UnmarshalJSON(b []byte) error { @@ -1570,7 +1570,7 @@ type GenericWebhookConfig struct { // [Output-Only] Whether username is set. UsernameSet bool `json:"username_set,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GenericWebhookConfig) UnmarshalJSON(b []byte) error { @@ -1592,7 +1592,7 @@ type GetAccountIpAccessEnableRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetAccountIpAccessEnableRequest) UnmarshalJSON(b []byte) error { @@ -1620,7 +1620,7 @@ type GetAibiDashboardEmbeddingAccessPolicySettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetAibiDashboardEmbeddingAccessPolicySettingRequest) UnmarshalJSON(b []byte) error { @@ -1642,7 +1642,7 @@ type GetAibiDashboardEmbeddingApprovedDomainsSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetAibiDashboardEmbeddingApprovedDomainsSettingRequest) UnmarshalJSON(b []byte) error { @@ -1664,7 +1664,7 @@ type GetAutomaticClusterUpdateSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetAutomaticClusterUpdateSettingRequest) UnmarshalJSON(b []byte) error { @@ -1686,7 +1686,7 @@ type GetComplianceSecurityProfileSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetComplianceSecurityProfileSettingRequest) UnmarshalJSON(b []byte) error { @@ -1708,7 +1708,7 @@ type GetCspEnablementAccountSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetCspEnablementAccountSettingRequest) UnmarshalJSON(b []byte) error { @@ -1730,7 +1730,7 @@ type GetDefaultNamespaceSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetDefaultNamespaceSettingRequest) UnmarshalJSON(b []byte) error { @@ -1752,7 +1752,7 @@ type GetDisableLegacyAccessRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetDisableLegacyAccessRequest) UnmarshalJSON(b []byte) error { @@ -1774,7 +1774,7 @@ type GetDisableLegacyDbfsRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetDisableLegacyDbfsRequest) UnmarshalJSON(b []byte) error { @@ -1796,7 +1796,7 @@ type GetDisableLegacyFeaturesRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetDisableLegacyFeaturesRequest) UnmarshalJSON(b []byte) error { @@ -1818,7 +1818,7 @@ type GetEnhancedSecurityMonitoringSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetEnhancedSecurityMonitoringSettingRequest) UnmarshalJSON(b []byte) error { @@ -1840,7 +1840,7 @@ type GetEsmEnablementAccountSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetEsmEnablementAccountSettingRequest) UnmarshalJSON(b []byte) error { @@ -1889,7 +1889,7 @@ type GetPersonalComputeSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetPersonalComputeSettingRequest) UnmarshalJSON(b []byte) error { @@ -1919,7 +1919,7 @@ type GetRestrictWorkspaceAdminsSettingRequest struct { // DELETE request to identify the rule set version you are deleting. Etag string `json:"-" url:"etag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetRestrictWorkspaceAdminsSettingRequest) UnmarshalJSON(b []byte) error { @@ -1979,7 +1979,7 @@ type IpAccessListInfo struct { // User ID of the user who updated this list. UpdatedBy int64 `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *IpAccessListInfo) UnmarshalJSON(b []byte) error { @@ -2001,7 +2001,7 @@ type ListNccAzurePrivateEndpointRulesResponse struct { // are no more results to show. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListNccAzurePrivateEndpointRulesResponse) UnmarshalJSON(b []byte) error { @@ -2017,7 +2017,7 @@ type ListNetworkConnectivityConfigurationsRequest struct { // Pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListNetworkConnectivityConfigurationsRequest) UnmarshalJSON(b []byte) error { @@ -2034,7 +2034,7 @@ type ListNetworkConnectivityConfigurationsResponse struct { // are no more results to show. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListNetworkConnectivityConfigurationsResponse) UnmarshalJSON(b []byte) error { @@ -2051,7 +2051,7 @@ type ListNotificationDestinationsRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListNotificationDestinationsRequest) UnmarshalJSON(b []byte) error { @@ -2068,7 +2068,7 @@ type ListNotificationDestinationsResponse struct { Results []ListNotificationDestinationsResult `json:"results,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListNotificationDestinationsResponse) UnmarshalJSON(b []byte) error { @@ -2088,7 +2088,7 @@ type ListNotificationDestinationsResult struct { // UUID identifying notification destination. Id string `json:"id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListNotificationDestinationsResult) UnmarshalJSON(b []byte) error { @@ -2106,7 +2106,7 @@ type ListPrivateEndpointRulesRequest struct { // Pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListPrivateEndpointRulesRequest) UnmarshalJSON(b []byte) error { @@ -2129,7 +2129,7 @@ type ListTokenManagementRequest struct { // Username of the user that created the token. CreatedByUsername string `json:"-" url:"created_by_username,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListTokenManagementRequest) UnmarshalJSON(b []byte) error { @@ -2187,7 +2187,7 @@ type MicrosoftTeamsConfig struct { // [Output-Only] Whether URL is set. UrlSet bool `json:"url_set,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MicrosoftTeamsConfig) UnmarshalJSON(b []byte) error { @@ -2243,7 +2243,7 @@ type NccAzurePrivateEndpointRule struct { // Time in epoch milliseconds when this object was updated. UpdatedTime int64 `json:"updated_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NccAzurePrivateEndpointRule) UnmarshalJSON(b []byte) error { @@ -2345,7 +2345,7 @@ type NccAzureServiceEndpointRule struct { // The Azure services to which this service endpoint rule applies to. TargetServices []string `json:"target_services,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NccAzureServiceEndpointRule) UnmarshalJSON(b []byte) error { @@ -2410,7 +2410,7 @@ type NetworkConnectivityConfiguration struct { // Time in epoch milliseconds when this object was updated. UpdatedTime int64 `json:"updated_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NetworkConnectivityConfiguration) UnmarshalJSON(b []byte) error { @@ -2434,7 +2434,7 @@ type NotificationDestination struct { // UUID identifying notification destination. Id string `json:"id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NotificationDestination) UnmarshalJSON(b []byte) error { @@ -2451,7 +2451,7 @@ type PagerdutyConfig struct { // [Output-Only] Whether integration key is set. IntegrationKeySet bool `json:"integration_key_set,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PagerdutyConfig) UnmarshalJSON(b []byte) error { @@ -2467,7 +2467,7 @@ type PartitionId struct { // The ID of the workspace. WorkspaceId int64 `json:"workspaceId,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PartitionId) UnmarshalJSON(b []byte) error { @@ -2540,7 +2540,7 @@ type PersonalComputeSetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PersonalComputeSetting) UnmarshalJSON(b []byte) error { @@ -2562,7 +2562,7 @@ type PublicTokenInfo struct { // The ID of this token. TokenId string `json:"token_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PublicTokenInfo) UnmarshalJSON(b []byte) error { @@ -2644,7 +2644,7 @@ type RestrictWorkspaceAdminsSetting struct { // instance per workspace. SettingName string `json:"setting_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RestrictWorkspaceAdminsSetting) UnmarshalJSON(b []byte) error { @@ -2672,7 +2672,7 @@ type SlackConfig struct { // [Output-Only] Whether URL is set. UrlSet bool `json:"url_set,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SlackConfig) UnmarshalJSON(b []byte) error { @@ -2687,7 +2687,7 @@ type StringMessage struct { // Represents a generic string value. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *StringMessage) UnmarshalJSON(b []byte) error { @@ -2708,7 +2708,7 @@ type TokenAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TokenAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -2731,7 +2731,7 @@ type TokenAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TokenAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -2764,7 +2764,7 @@ type TokenInfo struct { // If applicable, the ID of the workspace that the token was created in. WorkspaceId int64 `json:"workspace_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TokenInfo) UnmarshalJSON(b []byte) error { @@ -2782,7 +2782,7 @@ type TokenPermission struct { // Permission level PermissionLevel TokenPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TokenPermission) UnmarshalJSON(b []byte) error { @@ -2826,7 +2826,7 @@ type TokenPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TokenPermissions) UnmarshalJSON(b []byte) error { @@ -2842,7 +2842,7 @@ type TokenPermissionsDescription struct { // Permission level PermissionLevel TokenPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TokenPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -3166,7 +3166,7 @@ type UpdateIpAccessList struct { // excluded even if they are included in an allow list. ListType ListType `json:"list_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateIpAccessList) UnmarshalJSON(b []byte) error { @@ -3186,7 +3186,7 @@ type UpdateNotificationDestinationRequest struct { // UUID identifying notification destination. Id string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateNotificationDestinationRequest) UnmarshalJSON(b []byte) error { diff --git a/service/sharing/model.go b/service/sharing/model.go index e77d94b6c..e893acc72 100755 --- a/service/sharing/model.go +++ b/service/sharing/model.go @@ -48,7 +48,7 @@ type CreateProvider struct { // **OAUTH_CLIENT_CREDENTIALS** or not provided. RecipientProfileStr string `json:"recipient_profile_str,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateProvider) UnmarshalJSON(b []byte) error { @@ -86,7 +86,7 @@ type CreateRecipient struct { // only present when the __authentication_type__ is **DATABRICKS**. SharingCode string `json:"sharing_code,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateRecipient) UnmarshalJSON(b []byte) error { @@ -105,7 +105,7 @@ type CreateShare struct { // Storage root URL for the share. StorageRoot string `json:"storage_root,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateShare) UnmarshalJSON(b []byte) error { @@ -166,7 +166,7 @@ type GetRecipientSharePermissionsResponse struct { // An array of data share permissions for a recipient. PermissionsOut []ShareToPrivilegeAssignment `json:"permissions_out,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetRecipientSharePermissionsResponse) UnmarshalJSON(b []byte) error { @@ -184,7 +184,7 @@ type GetShareRequest struct { // The name of the share. Name string `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetShareRequest) UnmarshalJSON(b []byte) error { @@ -208,7 +208,7 @@ type ListProviderSharesResponse struct { // An array of provider shares. Shares []ProviderShare `json:"shares,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListProviderSharesResponse) UnmarshalJSON(b []byte) error { @@ -237,7 +237,7 @@ type ListProvidersRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListProvidersRequest) UnmarshalJSON(b []byte) error { @@ -256,7 +256,7 @@ type ListProvidersResponse struct { // An array of provider information objects. Providers []ProviderInfo `json:"providers,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListProvidersResponse) UnmarshalJSON(b []byte) error { @@ -285,7 +285,7 @@ type ListRecipientsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListRecipientsRequest) UnmarshalJSON(b []byte) error { @@ -304,7 +304,7 @@ type ListRecipientsResponse struct { // An array of recipient information objects. Recipients []RecipientInfo `json:"recipients,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListRecipientsResponse) UnmarshalJSON(b []byte) error { @@ -332,7 +332,7 @@ type ListSharesRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSharesRequest) UnmarshalJSON(b []byte) error { @@ -351,7 +351,7 @@ type ListSharesResponse struct { // An array of data share information objects. Shares []ShareInfo `json:"shares,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListSharesResponse) UnmarshalJSON(b []byte) error { @@ -386,7 +386,7 @@ type PartitionValue struct { // not be set. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PartitionValue) UnmarshalJSON(b []byte) error { @@ -543,7 +543,7 @@ type PrivilegeAssignment struct { // The privileges assigned to the principal. Privileges []Privilege `json:"privileges,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PrivilegeAssignment) UnmarshalJSON(b []byte) error { @@ -591,7 +591,7 @@ type ProviderInfo struct { // Username of user who last modified Provider. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ProviderInfo) UnmarshalJSON(b []byte) error { @@ -606,7 +606,7 @@ type ProviderShare struct { // The name of the Provider Share. Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ProviderShare) UnmarshalJSON(b []byte) error { @@ -669,7 +669,7 @@ type RecipientInfo struct { // Username of recipient updater. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RecipientInfo) UnmarshalJSON(b []byte) error { @@ -688,7 +688,7 @@ type RecipientProfile struct { // The version number of the recipient's credentials on a share. ShareCredentialsVersion int `json:"share_credentials_version,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RecipientProfile) UnmarshalJSON(b []byte) error { @@ -716,7 +716,7 @@ type RecipientTokenInfo struct { // Username of recipient token updater. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RecipientTokenInfo) UnmarshalJSON(b []byte) error { @@ -743,7 +743,7 @@ type RetrieveTokenResponse struct { // These field names must follow the delta sharing protocol. ShareCredentialsVersion int `json:"shareCredentialsVersion,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RetrieveTokenResponse) UnmarshalJSON(b []byte) error { @@ -793,7 +793,7 @@ type ShareInfo struct { // Username of share updater. UpdatedBy string `json:"updated_by,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ShareInfo) UnmarshalJSON(b []byte) error { @@ -821,7 +821,7 @@ type SharePermissionsRequest struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SharePermissionsRequest) UnmarshalJSON(b []byte) error { @@ -838,7 +838,7 @@ type ShareToPrivilegeAssignment struct { // The share name. ShareName string `json:"share_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ShareToPrivilegeAssignment) UnmarshalJSON(b []byte) error { @@ -898,7 +898,7 @@ type SharedDataObject struct { // file name. StringSharedAs string `json:"string_shared_as,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SharedDataObject) UnmarshalJSON(b []byte) error { @@ -1061,7 +1061,7 @@ type UpdateProvider struct { // **OAUTH_CLIENT_CREDENTIALS** or not provided. RecipientProfileStr string `json:"recipient_profile_str,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateProvider) UnmarshalJSON(b []byte) error { @@ -1091,7 +1091,7 @@ type UpdateRecipient struct { // read-modify-write. PropertiesKvpairs *SecurablePropertiesKvPairs `json:"properties_kvpairs,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateRecipient) UnmarshalJSON(b []byte) error { @@ -1116,7 +1116,7 @@ type UpdateShare struct { // Array of shared data object updates. Updates []SharedDataObjectUpdate `json:"updates,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateShare) UnmarshalJSON(b []byte) error { @@ -1145,7 +1145,7 @@ type UpdateSharePermissions struct { // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateSharePermissions) UnmarshalJSON(b []byte) error { diff --git a/service/sql/model.go b/service/sql/model.go index f74768bba..56b0e2bfa 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -16,7 +16,7 @@ type AccessControl struct { UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AccessControl) UnmarshalJSON(b []byte) error { @@ -72,7 +72,7 @@ type Alert struct { // The timestamp indicating when the alert was updated. UpdateTime string `json:"update_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Alert) UnmarshalJSON(b []byte) error { @@ -106,7 +106,7 @@ type AlertConditionThreshold struct { type AlertOperandColumn struct { Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AlertOperandColumn) UnmarshalJSON(b []byte) error { @@ -124,7 +124,7 @@ type AlertOperandValue struct { StringValue string `json:"string_value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AlertOperandValue) UnmarshalJSON(b []byte) error { @@ -199,7 +199,7 @@ type AlertOptions struct { // strings (eg. 'foobar'), floats (eg. 123.4), and booleans (true). Value any `json:"value"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AlertOptions) UnmarshalJSON(b []byte) error { @@ -281,7 +281,7 @@ type AlertQuery struct { // The ID of the user who owns the query. UserId int `json:"user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *AlertQuery) UnmarshalJSON(b []byte) error { @@ -335,7 +335,7 @@ type BaseChunkInfo struct { // The starting row offset within the result set. RowOffset int64 `json:"row_offset,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *BaseChunkInfo) UnmarshalJSON(b []byte) error { @@ -363,7 +363,7 @@ type Channel struct { Name ChannelName `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Channel) UnmarshalJSON(b []byte) error { @@ -381,7 +381,7 @@ type ChannelInfo struct { // Name of the channel Name ChannelName `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ChannelInfo) UnmarshalJSON(b []byte) error { @@ -444,7 +444,7 @@ type ClientConfig struct { HidePlotlyModeBar bool `json:"hide_plotly_mode_bar,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ClientConfig) UnmarshalJSON(b []byte) error { @@ -474,7 +474,7 @@ type ColumnInfo struct { // The full SQL type specification. TypeText string `json:"type_text,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ColumnInfo) UnmarshalJSON(b []byte) error { @@ -562,7 +562,7 @@ type CreateAlert struct { // again. Rearm int `json:"rearm,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateAlert) UnmarshalJSON(b []byte) error { @@ -604,7 +604,7 @@ type CreateAlertRequestAlert struct { // the alert will not be triggered again. SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateAlertRequestAlert) UnmarshalJSON(b []byte) error { @@ -645,7 +645,7 @@ type CreateQueryRequestQuery struct { // ID of the SQL warehouse attached to the query. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateQueryRequestQuery) UnmarshalJSON(b []byte) error { @@ -673,7 +673,7 @@ type CreateQueryVisualizationsLegacyRequest struct { // The type of visualization: chart, table, pivot table, and so on. Type string `json:"type"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateQueryVisualizationsLegacyRequest) UnmarshalJSON(b []byte) error { @@ -704,7 +704,7 @@ type CreateVisualizationRequestVisualization struct { // The type of visualization: counter, table, funnel, and so on. Type string `json:"type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateVisualizationRequestVisualization) UnmarshalJSON(b []byte) error { @@ -778,7 +778,7 @@ type CreateWarehouseRequest struct { // `enable_serverless_compute` to `true`. WarehouseType CreateWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateWarehouseRequest) UnmarshalJSON(b []byte) error { @@ -825,7 +825,7 @@ type CreateWarehouseResponse struct { // Id for the SQL warehouse. This value is unique across all SQL warehouses. Id string `json:"id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateWarehouseResponse) UnmarshalJSON(b []byte) error { @@ -852,7 +852,7 @@ type CreateWidget struct { // Width of a widget Width int `json:"width"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateWidget) UnmarshalJSON(b []byte) error { @@ -911,7 +911,7 @@ type Dashboard struct { Widgets []Widget `json:"widgets,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Dashboard) UnmarshalJSON(b []byte) error { @@ -934,7 +934,7 @@ type DashboardEditContent struct { Tags []string `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DashboardEditContent) UnmarshalJSON(b []byte) error { @@ -951,7 +951,7 @@ type DashboardOptions struct { // thirty days. MovedToTrashAt string `json:"moved_to_trash_at,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DashboardOptions) UnmarshalJSON(b []byte) error { @@ -980,7 +980,7 @@ type DashboardPostContent struct { Tags []string `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DashboardPostContent) UnmarshalJSON(b []byte) error { @@ -1018,7 +1018,7 @@ type DataSource struct { // a SQL warehouse. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DataSource) UnmarshalJSON(b []byte) error { @@ -1075,7 +1075,7 @@ type DateRangeValue struct { StartDayOfWeek int `json:"start_day_of_week,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DateRangeValue) UnmarshalJSON(b []byte) error { @@ -1152,7 +1152,7 @@ type DateValue struct { // Defaults to DAY_PRECISION (YYYY-MM-DD). Precision DatePrecision `json:"precision,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DateValue) UnmarshalJSON(b []byte) error { @@ -1274,7 +1274,7 @@ type EditAlert struct { // again. Rearm int `json:"rearm,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EditAlert) UnmarshalJSON(b []byte) error { @@ -1348,7 +1348,7 @@ type EditWarehouseRequest struct { // `enable_serverless_compute` to `true`. WarehouseType EditWarehouseRequestWarehouseType `json:"warehouse_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EditWarehouseRequest) UnmarshalJSON(b []byte) error { @@ -1404,7 +1404,7 @@ type EndpointConfPair struct { Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EndpointConfPair) UnmarshalJSON(b []byte) error { @@ -1429,7 +1429,7 @@ type EndpointHealth struct { // warehouses. Summary string `json:"summary,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EndpointHealth) UnmarshalJSON(b []byte) error { @@ -1516,7 +1516,7 @@ type EndpointInfo struct { // `enable_serverless_compute` to `true`. WarehouseType EndpointInfoWarehouseType `json:"warehouse_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EndpointInfo) UnmarshalJSON(b []byte) error { @@ -1564,7 +1564,7 @@ type EndpointTagPair struct { Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EndpointTagPair) UnmarshalJSON(b []byte) error { @@ -1587,7 +1587,7 @@ type EnumValue struct { // List of selected query parameter values. Values []string `json:"values,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EnumValue) UnmarshalJSON(b []byte) error { @@ -1723,7 +1723,7 @@ type ExecuteStatementRequest struct { // [What are SQL warehouses?]: https://docs.databricks.com/sql/admin/warehouse-type.html WarehouseId string `json:"warehouse_id"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExecuteStatementRequest) UnmarshalJSON(b []byte) error { @@ -1800,7 +1800,7 @@ type ExternalLink struct { // The starting row offset within the result set. RowOffset int64 `json:"row_offset,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExternalLink) UnmarshalJSON(b []byte) error { @@ -1880,7 +1880,7 @@ type GetResponse struct { // A singular noun object type. ObjectType ObjectType `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetResponse) UnmarshalJSON(b []byte) error { @@ -2005,7 +2005,7 @@ type GetWarehouseResponse struct { // `enable_serverless_compute` to `true`. WarehouseType GetWarehouseResponseWarehouseType `json:"warehouse_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetWarehouseResponse) UnmarshalJSON(b []byte) error { @@ -2075,7 +2075,7 @@ type GetWorkspaceWarehouseConfigResponse struct { // SQL configuration parameters SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetWorkspaceWarehouseConfigResponse) UnmarshalJSON(b []byte) error { @@ -2144,7 +2144,7 @@ type LegacyAlert struct { User *User `json:"user,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LegacyAlert) UnmarshalJSON(b []byte) error { @@ -2257,7 +2257,7 @@ type LegacyQuery struct { Visualizations []LegacyVisualization `json:"visualizations,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LegacyQuery) UnmarshalJSON(b []byte) error { @@ -2294,7 +2294,7 @@ type LegacyVisualization struct { UpdatedAt string `json:"updated_at,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *LegacyVisualization) UnmarshalJSON(b []byte) error { @@ -2338,7 +2338,7 @@ type ListAlertsRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAlertsRequest) UnmarshalJSON(b []byte) error { @@ -2354,7 +2354,7 @@ type ListAlertsResponse struct { Results []ListAlertsResponseAlert `json:"results,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAlertsResponse) UnmarshalJSON(b []byte) error { @@ -2408,7 +2408,7 @@ type ListAlertsResponseAlert struct { // The timestamp indicating when the alert was updated. UpdateTime string `json:"update_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListAlertsResponseAlert) UnmarshalJSON(b []byte) error { @@ -2430,7 +2430,7 @@ type ListDashboardsRequest struct { // Full text search term. Q string `json:"-" url:"q,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListDashboardsRequest) UnmarshalJSON(b []byte) error { @@ -2492,7 +2492,7 @@ type ListQueriesLegacyRequest struct { // Full text search term Q string `json:"-" url:"q,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListQueriesLegacyRequest) UnmarshalJSON(b []byte) error { @@ -2509,7 +2509,7 @@ type ListQueriesRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListQueriesRequest) UnmarshalJSON(b []byte) error { @@ -2528,7 +2528,7 @@ type ListQueriesResponse struct { Res []QueryInfo `json:"res,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListQueriesResponse) UnmarshalJSON(b []byte) error { @@ -2555,7 +2555,7 @@ type ListQueryHistoryRequest struct { // optional. PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListQueryHistoryRequest) UnmarshalJSON(b []byte) error { @@ -2571,7 +2571,7 @@ type ListQueryObjectsResponse struct { Results []ListQueryObjectsResponseQuery `json:"results,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListQueryObjectsResponse) UnmarshalJSON(b []byte) error { @@ -2618,7 +2618,7 @@ type ListQueryObjectsResponseQuery struct { // ID of the SQL warehouse attached to the query. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListQueryObjectsResponseQuery) UnmarshalJSON(b []byte) error { @@ -2639,7 +2639,7 @@ type ListResponse struct { // List of dashboards returned. Results []Dashboard `json:"results,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListResponse) UnmarshalJSON(b []byte) error { @@ -2658,7 +2658,7 @@ type ListVisualizationsForQueryRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListVisualizationsForQueryRequest) UnmarshalJSON(b []byte) error { @@ -2674,7 +2674,7 @@ type ListVisualizationsForQueryResponse struct { Results []Visualization `json:"results,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListVisualizationsForQueryResponse) UnmarshalJSON(b []byte) error { @@ -2691,7 +2691,7 @@ type ListWarehousesRequest struct { // not specified, the user from the session header is used. RunAsUserId int `json:"-" url:"run_as_user_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListWarehousesRequest) UnmarshalJSON(b []byte) error { @@ -2716,7 +2716,7 @@ type MultiValuesOptions struct { // Character that suffixes each selected parameter value. Suffix string `json:"suffix,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MultiValuesOptions) UnmarshalJSON(b []byte) error { @@ -2730,7 +2730,7 @@ func (s MultiValuesOptions) MarshalJSON() ([]byte, error) { type NumericValue struct { Value float64 `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *NumericValue) UnmarshalJSON(b []byte) error { @@ -2814,7 +2814,7 @@ type OdbcParams struct { Protocol string `json:"protocol,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *OdbcParams) UnmarshalJSON(b []byte) error { @@ -2875,7 +2875,7 @@ type Parameter struct { // The default value for this parameter. Value any `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Parameter) UnmarshalJSON(b []byte) error { @@ -3031,7 +3031,7 @@ type Query struct { // ID of the SQL warehouse attached to the query. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Query) UnmarshalJSON(b []byte) error { @@ -3050,7 +3050,7 @@ type QueryBackedValue struct { // List of selected query parameter values. Values []string `json:"values,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryBackedValue) UnmarshalJSON(b []byte) error { @@ -3088,7 +3088,7 @@ type QueryEditContent struct { Tags []string `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryEditContent) UnmarshalJSON(b []byte) error { @@ -3164,7 +3164,7 @@ type QueryInfo struct { // Warehouse ID. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryInfo) UnmarshalJSON(b []byte) error { @@ -3185,7 +3185,7 @@ type QueryList struct { // List of queries returned. Results []LegacyQuery `json:"results,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryList) UnmarshalJSON(b []byte) error { @@ -3256,7 +3256,7 @@ type QueryMetrics struct { // tenant, in bytes. WriteRemoteBytes int64 `json:"write_remote_bytes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryMetrics) UnmarshalJSON(b []byte) error { @@ -3279,7 +3279,7 @@ type QueryOptions struct { // The name of the schema to execute this query in. Schema string `json:"schema,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryOptions) UnmarshalJSON(b []byte) error { @@ -3311,7 +3311,7 @@ type QueryParameter struct { // Text displayed in the user-facing parameter widget in the UI. Title string `json:"title,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryParameter) UnmarshalJSON(b []byte) error { @@ -3349,7 +3349,7 @@ type QueryPostContent struct { Tags []string `json:"tags,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryPostContent) UnmarshalJSON(b []byte) error { @@ -3513,7 +3513,7 @@ type ResultData struct { // The starting row offset within the result set. RowOffset int64 `json:"row_offset,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ResultData) UnmarshalJSON(b []byte) error { @@ -3543,7 +3543,7 @@ type ResultManifest struct { // `byte_limit`. Truncated bool `json:"truncated,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ResultManifest) UnmarshalJSON(b []byte) error { @@ -3560,7 +3560,7 @@ type ResultSchema struct { Columns []ColumnInfo `json:"columns,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ResultSchema) UnmarshalJSON(b []byte) error { @@ -3633,7 +3633,7 @@ type ServiceError struct { // A brief summary of the error condition. Message string `json:"message,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ServiceError) UnmarshalJSON(b []byte) error { @@ -3712,7 +3712,7 @@ type SetResponse struct { // A singular noun object type. ObjectType ObjectType `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SetResponse) UnmarshalJSON(b []byte) error { @@ -3750,7 +3750,7 @@ type SetWorkspaceWarehouseConfigRequest struct { // SQL configuration parameters SqlConfigurationParameters *RepeatedEndpointConfPairs `json:"sql_configuration_parameters,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SetWorkspaceWarehouseConfigRequest) UnmarshalJSON(b []byte) error { @@ -3884,7 +3884,7 @@ type StatementParameterListItem struct { // is interpreted as NULL. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *StatementParameterListItem) UnmarshalJSON(b []byte) error { @@ -3907,7 +3907,7 @@ type StatementResponse struct { // information. Status *StatementStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *StatementResponse) UnmarshalJSON(b []byte) error { @@ -4278,7 +4278,7 @@ func (f *TerminationReasonType) Type() string { type TextValue struct { Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TextValue) UnmarshalJSON(b []byte) error { @@ -4295,7 +4295,7 @@ type TimeRange struct { // The start time in milliseconds. StartTimeMs int64 `json:"start_time_ms,omitempty" url:"start_time_ms,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TimeRange) UnmarshalJSON(b []byte) error { @@ -4310,7 +4310,7 @@ type TransferOwnershipObjectId struct { // Email address for the new owner, who must exist in the workspace. NewOwner string `json:"new_owner,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TransferOwnershipObjectId) UnmarshalJSON(b []byte) error { @@ -4330,7 +4330,7 @@ type TransferOwnershipRequest struct { // The type of object on which to change ownership. ObjectType OwnableObjectType `json:"-" url:"-"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *TransferOwnershipRequest) UnmarshalJSON(b []byte) error { @@ -4397,7 +4397,7 @@ type UpdateAlertRequestAlert struct { // the alert will not be triggered again. SecondsToRetrigger int `json:"seconds_to_retrigger,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateAlertRequestAlert) UnmarshalJSON(b []byte) error { @@ -4452,7 +4452,7 @@ type UpdateQueryRequestQuery struct { // ID of the SQL warehouse attached to the query. WarehouseId string `json:"warehouse_id,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateQueryRequestQuery) UnmarshalJSON(b []byte) error { @@ -4498,7 +4498,7 @@ type UpdateVisualizationRequestVisualization struct { // The type of visualization: counter, table, funnel, and so on. Type string `json:"type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateVisualizationRequestVisualization) UnmarshalJSON(b []byte) error { @@ -4516,7 +4516,7 @@ type User struct { Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *User) UnmarshalJSON(b []byte) error { @@ -4549,7 +4549,7 @@ type Visualization struct { // The timestamp indicating when the visualization was updated. UpdateTime string `json:"update_time,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Visualization) UnmarshalJSON(b []byte) error { @@ -4570,7 +4570,7 @@ type WarehouseAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WarehouseAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -4593,7 +4593,7 @@ type WarehouseAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WarehouseAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -4611,7 +4611,7 @@ type WarehousePermission struct { // Permission level PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WarehousePermission) UnmarshalJSON(b []byte) error { @@ -4661,7 +4661,7 @@ type WarehousePermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WarehousePermissions) UnmarshalJSON(b []byte) error { @@ -4677,7 +4677,7 @@ type WarehousePermissionsDescription struct { // Permission level PermissionLevel WarehousePermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WarehousePermissionsDescription) UnmarshalJSON(b []byte) error { @@ -4701,7 +4701,7 @@ type WarehouseTypePair struct { // Warehouse type: `PRO` or `CLASSIC`. WarehouseType WarehouseTypePairWarehouseType `json:"warehouse_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WarehouseTypePair) UnmarshalJSON(b []byte) error { @@ -4756,7 +4756,7 @@ type Widget struct { // Unused field. Width int `json:"width,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Widget) UnmarshalJSON(b []byte) error { @@ -4786,7 +4786,7 @@ type WidgetOptions struct { // Timestamp of the last time this object was updated. UpdatedAt string `json:"updated_at,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WidgetOptions) UnmarshalJSON(b []byte) error { @@ -4811,7 +4811,7 @@ type WidgetPosition struct { // height of the widget measured in dashboard grid cells SizeY int `json:"sizeY,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WidgetPosition) UnmarshalJSON(b []byte) error { diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index 48335536a..a2aa845a0 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -12,7 +12,7 @@ type ColumnInfo struct { // Name of the column. Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ColumnInfo) UnmarshalJSON(b []byte) error { @@ -64,7 +64,7 @@ type DeleteDataResult struct { // Count of successfully processed rows. SuccessRowCount int64 `json:"success_row_count,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeleteDataResult) UnmarshalJSON(b []byte) error { @@ -167,7 +167,7 @@ type DeltaSyncVectorIndexSpecRequest struct { // The name of the source table. SourceTable string `json:"source_table,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeltaSyncVectorIndexSpecRequest) UnmarshalJSON(b []byte) error { @@ -200,7 +200,7 @@ type DeltaSyncVectorIndexSpecResponse struct { // The name of the source table. SourceTable string `json:"source_table,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DeltaSyncVectorIndexSpecResponse) UnmarshalJSON(b []byte) error { @@ -224,7 +224,7 @@ type DirectAccessVectorIndexSpec struct { // Supported types for vector column: `array`, `array`,`. SchemaJson string `json:"schema_json,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *DirectAccessVectorIndexSpec) UnmarshalJSON(b []byte) error { @@ -241,7 +241,7 @@ type EmbeddingSourceColumn struct { // Name of the column Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EmbeddingSourceColumn) UnmarshalJSON(b []byte) error { @@ -258,7 +258,7 @@ type EmbeddingVectorColumn struct { // Name of the column Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EmbeddingVectorColumn) UnmarshalJSON(b []byte) error { @@ -289,7 +289,7 @@ type EndpointInfo struct { // Number of indexes on the endpoint NumIndexes int `json:"num_indexes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EndpointInfo) UnmarshalJSON(b []byte) error { @@ -307,7 +307,7 @@ type EndpointStatus struct { // Current state of the endpoint State EndpointStatusState `json:"state,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *EndpointStatus) UnmarshalJSON(b []byte) error { @@ -393,7 +393,7 @@ type ListEndpointResponse struct { // there are no more results to show. NextPageToken string `json:"next_page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListEndpointResponse) UnmarshalJSON(b []byte) error { @@ -409,7 +409,7 @@ type ListEndpointsRequest struct { // Token for pagination PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListEndpointsRequest) UnmarshalJSON(b []byte) error { @@ -427,7 +427,7 @@ type ListIndexesRequest struct { // Token for pagination PageToken string `json:"-" url:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListIndexesRequest) UnmarshalJSON(b []byte) error { @@ -449,7 +449,7 @@ type ListVectorIndexesResponse struct { VectorIndexes []MiniVectorIndex `json:"vector_indexes,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListVectorIndexesResponse) UnmarshalJSON(b []byte) error { @@ -467,7 +467,7 @@ type MapStringValueEntry struct { // Column value, nullable. Value *Value `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MapStringValueEntry) UnmarshalJSON(b []byte) error { @@ -496,7 +496,7 @@ type MiniVectorIndex struct { // Primary key of the index PrimaryKey string `json:"primary_key,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *MiniVectorIndex) UnmarshalJSON(b []byte) error { @@ -558,7 +558,7 @@ type QueryVectorIndexNextPageRequest struct { // `QueryVectorIndexNextPage` API. PageToken string `json:"page_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryVectorIndexNextPageRequest) UnmarshalJSON(b []byte) error { @@ -593,7 +593,7 @@ type QueryVectorIndexRequest struct { // Threshold for the approximate nearest neighbor search. Defaults to 0.0. ScoreThreshold float64 `json:"score_threshold,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryVectorIndexRequest) UnmarshalJSON(b []byte) error { @@ -614,7 +614,7 @@ type QueryVectorIndexResponse struct { // Data returned in the query result. Result *ResultData `json:"result,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *QueryVectorIndexResponse) UnmarshalJSON(b []byte) error { @@ -632,7 +632,7 @@ type ResultData struct { // Number of rows in the result set. RowCount int `json:"row_count,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ResultData) UnmarshalJSON(b []byte) error { @@ -650,7 +650,7 @@ type ResultManifest struct { // Information about each column in the result set. Columns []ColumnInfo `json:"columns,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ResultManifest) UnmarshalJSON(b []byte) error { @@ -670,7 +670,7 @@ type ScanVectorIndexRequest struct { // Number of results to return. Defaults to 10. NumResults int `json:"num_results,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ScanVectorIndexRequest) UnmarshalJSON(b []byte) error { @@ -688,7 +688,7 @@ type ScanVectorIndexResponse struct { // Primary key of the last entry. LastPrimaryKey string `json:"last_primary_key,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ScanVectorIndexResponse) UnmarshalJSON(b []byte) error { @@ -720,7 +720,7 @@ type UpsertDataResult struct { // Count of successfully processed rows. SuccessRowCount int64 `json:"success_row_count,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpsertDataResult) UnmarshalJSON(b []byte) error { @@ -791,7 +791,7 @@ type Value struct { StructValue *Struct `json:"struct_value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Value) UnmarshalJSON(b []byte) error { @@ -826,7 +826,7 @@ type VectorIndex struct { Status *VectorIndexStatus `json:"status,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *VectorIndex) UnmarshalJSON(b []byte) error { @@ -847,7 +847,7 @@ type VectorIndexStatus struct { // Whether the index is ready for search Ready bool `json:"ready,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *VectorIndexStatus) UnmarshalJSON(b []byte) error { diff --git a/service/workspace/model.go b/service/workspace/model.go index dbc17017e..03dbb05b7 100755 --- a/service/workspace/model.go +++ b/service/workspace/model.go @@ -73,7 +73,7 @@ type CreateCredentialsRequest struct { // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html PersonalAccessToken string `json:"personal_access_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateCredentialsRequest) UnmarshalJSON(b []byte) error { @@ -93,7 +93,7 @@ type CreateCredentialsResponse struct { // associated with the credential. GitUsername string `json:"git_username,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateCredentialsResponse) UnmarshalJSON(b []byte) error { @@ -120,7 +120,7 @@ type CreateRepoRequest struct { // URL of the Git repository to be linked. Url string `json:"url"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateRepoRequest) UnmarshalJSON(b []byte) error { @@ -148,7 +148,7 @@ type CreateRepoResponse struct { // URL of the linked Git repository. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateRepoResponse) UnmarshalJSON(b []byte) error { @@ -171,7 +171,7 @@ type CreateScope struct { // default to `DATABRICKS` ScopeBackendType ScopeBackendType `json:"scope_backend_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CreateScope) UnmarshalJSON(b []byte) error { @@ -194,7 +194,7 @@ type CredentialInfo struct { // associated with the credential. GitUsername string `json:"git_username,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *CredentialInfo) UnmarshalJSON(b []byte) error { @@ -214,7 +214,7 @@ type Delete struct { // deleted and cannot be undone. Recursive bool `json:"recursive,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Delete) UnmarshalJSON(b []byte) error { @@ -338,7 +338,7 @@ type ExportResponse struct { // The file type of the exported file. FileType string `json:"file_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ExportResponse) UnmarshalJSON(b []byte) error { @@ -372,7 +372,7 @@ type GetCredentialsResponse struct { // associated with the credential. GitUsername string `json:"git_username,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetCredentialsResponse) UnmarshalJSON(b []byte) error { @@ -422,7 +422,7 @@ type GetRepoResponse struct { // URL of the linked Git repository. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetRepoResponse) UnmarshalJSON(b []byte) error { @@ -447,7 +447,7 @@ type GetSecretResponse struct { // The value of the secret in its byte representation. Value string `json:"value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *GetSecretResponse) UnmarshalJSON(b []byte) error { @@ -517,7 +517,7 @@ type Import struct { // only supported for the `DBC` and `SOURCE` formats. Path string `json:"path"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *Import) UnmarshalJSON(b []byte) error { @@ -647,7 +647,7 @@ type ListReposRequest struct { // `/Workspace`) Git folders (repos) from `/Workspace/Repos` will be served. PathPrefix string `json:"-" url:"path_prefix,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListReposRequest) UnmarshalJSON(b []byte) error { @@ -665,7 +665,7 @@ type ListReposResponse struct { // List of Git folders (repos). Repos []RepoInfo `json:"repos,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListReposResponse) UnmarshalJSON(b []byte) error { @@ -704,7 +704,7 @@ type ListWorkspaceRequest struct { // The absolute path of the notebook or directory. Path string `json:"-" url:"path"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ListWorkspaceRequest) UnmarshalJSON(b []byte) error { @@ -749,7 +749,7 @@ type ObjectInfo struct { // Only applicable to files. The file size in bytes can be returned. Size int64 `json:"size,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *ObjectInfo) UnmarshalJSON(b []byte) error { @@ -828,7 +828,7 @@ type PutSecret struct { // If specified, note that the value will be stored in UTF-8 (MB4) form. StringValue string `json:"string_value,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *PutSecret) UnmarshalJSON(b []byte) error { @@ -852,7 +852,7 @@ type RepoAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RepoAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -875,7 +875,7 @@ type RepoAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RepoAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -903,7 +903,7 @@ type RepoInfo struct { // URL of the remote git repository. Url string `json:"url,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RepoInfo) UnmarshalJSON(b []byte) error { @@ -921,7 +921,7 @@ type RepoPermission struct { // Permission level PermissionLevel RepoPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RepoPermission) UnmarshalJSON(b []byte) error { @@ -971,7 +971,7 @@ type RepoPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RepoPermissions) UnmarshalJSON(b []byte) error { @@ -987,7 +987,7 @@ type RepoPermissionsDescription struct { // Permission level PermissionLevel RepoPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *RepoPermissionsDescription) UnmarshalJSON(b []byte) error { @@ -1037,7 +1037,7 @@ type SecretMetadata struct { // The last updated timestamp (in milliseconds) for the secret. LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SecretMetadata) UnmarshalJSON(b []byte) error { @@ -1056,7 +1056,7 @@ type SecretScope struct { // A unique name to identify the secret scope. Name string `json:"name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *SecretScope) UnmarshalJSON(b []byte) error { @@ -1108,7 +1108,7 @@ type UpdateCredentialsRequest struct { // [Learn more]: https://docs.databricks.com/repos/get-access-tokens-from-git-provider.html PersonalAccessToken string `json:"personal_access_token,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateCredentialsRequest) UnmarshalJSON(b []byte) error { @@ -1136,7 +1136,7 @@ type UpdateRepoRequest struct { // HEAD. Tag string `json:"tag,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *UpdateRepoRequest) UnmarshalJSON(b []byte) error { @@ -1160,7 +1160,7 @@ type WorkspaceObjectAccessControlRequest struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WorkspaceObjectAccessControlRequest) UnmarshalJSON(b []byte) error { @@ -1183,7 +1183,7 @@ type WorkspaceObjectAccessControlResponse struct { // name of the user UserName string `json:"user_name,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WorkspaceObjectAccessControlResponse) UnmarshalJSON(b []byte) error { @@ -1201,7 +1201,7 @@ type WorkspaceObjectPermission struct { // Permission level PermissionLevel WorkspaceObjectPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WorkspaceObjectPermission) UnmarshalJSON(b []byte) error { @@ -1251,7 +1251,7 @@ type WorkspaceObjectPermissions struct { ObjectType string `json:"object_type,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WorkspaceObjectPermissions) UnmarshalJSON(b []byte) error { @@ -1267,7 +1267,7 @@ type WorkspaceObjectPermissionsDescription struct { // Permission level PermissionLevel WorkspaceObjectPermissionLevel `json:"permission_level,omitempty"` - ForceSendFields []string `json:"-"` + ForceSendFields []string `json:"-" url:"-"` } func (s *WorkspaceObjectPermissionsDescription) UnmarshalJSON(b []byte) error { diff --git a/version/version.go b/version/version.go index 4a840112f..18a61d2b0 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.58.0" +const Version = "0.58.1" From 43c3d0ed6a63888f0002ba18f0476cffdcad17e4 Mon Sep 17 00:00:00 2001 From: Giorgi Kikolashvili <47174341+gkiko10@users.noreply.github.com> Date: Thu, 13 Feb 2025 15:19:56 +0100 Subject: [PATCH 16/54] [Internal] GetRun logic paginates more arrays (#1132) ## What changes are proposed in this pull request? The existing code only paginates tasks and iterations. With this PR we update the logic to also paginate job_clusters, job_parameters and repair_history. This changes are needed for [Jobs API 2.2](https://docs.databricks.com/api/workspace/jobs/getrun) compatibility. ## How is this tested? I enabled API 2.2 calls by modifying URL string `/api/2.2/jobs/runs/get` in service/jobs/impl.go. Then I ran unit test from service/jobs/ext_api_test.go --- service/jobs/ext_api.go | 12 +++++--- service/jobs/ext_api_test.go | 54 ++++++++++++++++++++++++++++++++---- 2 files changed, 57 insertions(+), 9 deletions(-) diff --git a/service/jobs/ext_api.go b/service/jobs/ext_api.go index 0b8f3ee80..e7ea090c3 100644 --- a/service/jobs/ext_api.go +++ b/service/jobs/ext_api.go @@ -14,9 +14,9 @@ func (a *JobsAPI) GetRun(ctx context.Context, request GetRunRequest) (*Run, erro // When querying a ForEach task run, a page token is returned when there are more than 100 iterations. Only a single task is returned, corresponding to the ForEach task itself. Therefore, the client only reads the iterations from the next page and not the tasks. isPaginatingIterations := len(run.Iterations) > 0 - pageToken := run.NextPageToken - for pageToken != "" { - request.PageToken = pageToken + // runs/get response includes next_page_token as long as there are more pages to fetch. + for run.NextPageToken != "" { + request.PageToken = run.NextPageToken nextRun, err := a.jobsImpl.GetRun(ctx, request) if err != nil { return nil, err @@ -27,7 +27,11 @@ func (a *JobsAPI) GetRun(ctx context.Context, request GetRunRequest) (*Run, erro } else { run.Tasks = append(run.Tasks, nextRun.Tasks...) } - pageToken = nextRun.NextPageToken + // Each new page of runs/get response includes the next page of the job_clusters, job_parameters, and repair history. + run.JobClusters = append(run.JobClusters, nextRun.JobClusters...) + run.JobParameters = append(run.JobParameters, nextRun.JobParameters...) + run.RepairHistory = append(run.RepairHistory, nextRun.RepairHistory...) + run.NextPageToken = nextRun.NextPageToken } return run, nil diff --git a/service/jobs/ext_api_test.go b/service/jobs/ext_api_test.go index e13a437f8..acec84215 100644 --- a/service/jobs/ext_api_test.go +++ b/service/jobs/ext_api_test.go @@ -29,7 +29,6 @@ func TestGetRun(t *testing.T) { TaskKey: "task2", }, }, - NextPageToken: "", }, }, { @@ -175,7 +174,7 @@ func TestGetRun(t *testing.T) { assert.Equal(t, expected, run.Tasks) }) - t.Run("clusters array is not increased when paginated", func(t *testing.T) { + t.Run("clusters array is also paginated", func(t *testing.T) { var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ { Method: "GET", @@ -199,6 +198,16 @@ func TestGetRun(t *testing.T) { JobClusterKey: "cluster2", }, }, + JobParameters: []JobParameter{ + { + Name: "key1", + Value: "value1", + }, + { + Name: "key2", + Value: "value2", + }, + }, NextPageToken: "token1", }, }, @@ -218,10 +227,17 @@ func TestGetRun(t *testing.T) { }, JobClusters: []JobCluster{ { - JobClusterKey: "cluster1", + JobClusterKey: "cluster3", }, + }, + JobParameters: []JobParameter{ { - JobClusterKey: "cluster2", + Name: "key3", + Value: "value3", + }, + { + Name: "key4", + Value: "value4", }, }, }, @@ -253,6 +269,27 @@ func TestGetRun(t *testing.T) { { JobClusterKey: "cluster2", }, + { + JobClusterKey: "cluster3", + }, + }, + JobParameters: []JobParameter{ + { + Name: "key1", + Value: "value1", + }, + { + Name: "key2", + Value: "value2", + }, + { + Name: "key3", + Value: "value3", + }, + { + Name: "key4", + Value: "value4", + }, }, }, }, @@ -269,9 +306,16 @@ func TestGetRun(t *testing.T) { run, err := api.GetRun(ctx, request) assert.NoError(t, err) - assert.Equal(t, 2, len(run.JobClusters)) + assert.Equal(t, 4, len(run.Tasks)) + assert.Equal(t, 3, len(run.JobClusters)) + assert.Equal(t, 4, len(run.JobParameters)) assert.Equal(t, "cluster1", run.JobClusters[0].JobClusterKey) assert.Equal(t, "cluster2", run.JobClusters[1].JobClusterKey) + assert.Equal(t, "cluster3", run.JobClusters[2].JobClusterKey) + assert.Equal(t, "key1", run.JobParameters[0].Name) + assert.Equal(t, "value1", run.JobParameters[0].Value) + assert.Equal(t, "key4", run.JobParameters[3].Name) + assert.Equal(t, "value4", run.JobParameters[3].Value) }) t.Run("run with two iterations pages", func(t *testing.T) { From 5a9ddae6d0925d9bb837c0ace3e32e50362faf1f Mon Sep 17 00:00:00 2001 From: Giorgi Kikolashvili <47174341+gkiko10@users.noreply.github.com> Date: Thu, 13 Feb 2025 15:20:04 +0100 Subject: [PATCH 17/54] [Internal] Update Jobs GetJob API to support paginated responses (#1133) ## What changes are proposed in this pull request? Introduces logic in extension for jobs GetJob call that paginates tasks and other arrays in the response. This change is necessary for SDK and API 2.2 compatibility. API 2.2 serves paginated responses as long as next_page_token field is present in the response. The pagination logic is not exposed to the customer. ## How is this tested? I enabled API 2.2 calls by modifying URL string /api/2.2/jobs/get in service/jobs/impl.go. Then I ran unit test from service/jobs/ext_api_test.go --- service/jobs/ext_api.go | 27 ++++ service/jobs/ext_api_test.go | 255 +++++++++++++++++++++++++++++++++++ 2 files changed, 282 insertions(+) diff --git a/service/jobs/ext_api.go b/service/jobs/ext_api.go index e7ea090c3..f834d6e40 100644 --- a/service/jobs/ext_api.go +++ b/service/jobs/ext_api.go @@ -36,3 +36,30 @@ func (a *JobsAPI) GetRun(ctx context.Context, request GetRunRequest) (*Run, erro return run, nil } + +// Get retrieves a job based on the provided request. +// It handles pagination if the job contains multiple tasks, job_clusters, job_parameters or environments. +func (a *JobsAPI) Get(ctx context.Context, request GetJobRequest) (*Job, error) { + job, err := a.jobsImpl.Get(ctx, request) + if err != nil { + return nil, err + } + + // jobs/get response includes next_page_token as long as there are more pages to fetch. + for job.NextPageToken != "" { + request.PageToken = job.NextPageToken + nextJob, err := a.jobsImpl.Get(ctx, request) + if err != nil { + return nil, err + } + + // Each new page of jobs/get response includes the next page of the tasks, job_clusters, job_parameters, and environments. + job.Settings.Tasks = append(job.Settings.Tasks, nextJob.Settings.Tasks...) + job.Settings.JobClusters = append(job.Settings.JobClusters, nextJob.Settings.JobClusters...) + job.Settings.Parameters = append(job.Settings.Parameters, nextJob.Settings.Parameters...) + job.Settings.Environments = append(job.Settings.Environments, nextJob.Settings.Environments...) + job.NextPageToken = nextJob.NextPageToken + } + + return job, nil +} diff --git a/service/jobs/ext_api_test.go b/service/jobs/ext_api_test.go index acec84215..bf6a73c70 100644 --- a/service/jobs/ext_api_test.go +++ b/service/jobs/ext_api_test.go @@ -413,3 +413,258 @@ func TestGetRun(t *testing.T) { assert.EqualValues(t, 999, run.Tasks[0].RunId) }) } + +func TestGetJob(t *testing.T) { + ctx := context.Background() + + t.Run("job with no pagination", func(t *testing.T) { + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=514594995218126", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "task1", + }, + { + TaskKey: "task2", + }, + { + TaskKey: "task3", + }, + { + TaskKey: "task4", + }, + }, + }, + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/jobs/get?job_id=514594995218126", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "task1", + }, + { + TaskKey: "task2", + }, + { + TaskKey: "task3", + }, + { + TaskKey: "task4", + }, + }, + }, + }, + }, + } + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + request := GetJobRequest{JobId: 514594995218126} + job, err := api.Get(ctx, request) + + assert.NoError(t, err) + assert.Equal(t, 4, len(job.Settings.Tasks)) + assert.EqualValues(t, "task1", job.Settings.Tasks[0].TaskKey) + assert.EqualValues(t, "task4", job.Settings.Tasks[3].TaskKey) + }) + + t.Run("job with multiple pages", func(t *testing.T) { + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=514594995218126", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "task1", + }, + { + TaskKey: "task2", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "cluster1", + }, + { + JobClusterKey: "cluster2", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env1", + }, + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=514594995218126&page_token=token1", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "task3", + }, + { + TaskKey: "task4", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "cluster3", + }, + { + JobClusterKey: "cluster4", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param3", + Default: "default3", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env2", + }, + }, + }, + NextPageToken: "token2", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=514594995218126&page_token=token2", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "task5", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env3", + }, + }, + }, + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/jobs/get?job_id=514594995218126", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "task1", + }, + { + TaskKey: "task2", + }, + { + TaskKey: "task3", + }, + { + TaskKey: "task4", + }, + { + TaskKey: "task5", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "cluster1", + }, + { + JobClusterKey: "cluster2", + }, + { + JobClusterKey: "cluster3", + }, + { + JobClusterKey: "cluster4", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + { + Name: "param3", + Default: "default3", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env1", + }, + { + EnvironmentKey: "env2", + }, + { + EnvironmentKey: "env3", + }, + }, + }, + }, + }, + } + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + request := GetJobRequest{JobId: 514594995218126} + job, err := api.Get(ctx, request) + + assert.NoError(t, err) + assert.Equal(t, 5, len(job.Settings.Tasks)) + assert.Equal(t, 4, len(job.Settings.JobClusters)) + assert.Equal(t, 3, len(job.Settings.Parameters)) + assert.Equal(t, 3, len(job.Settings.Environments)) + assert.EqualValues(t, "task1", job.Settings.Tasks[0].TaskKey) + assert.EqualValues(t, "task4", job.Settings.Tasks[3].TaskKey) + assert.EqualValues(t, "task5", job.Settings.Tasks[4].TaskKey) + assert.EqualValues(t, "cluster3", job.Settings.JobClusters[2].JobClusterKey) + assert.EqualValues(t, "param3", job.Settings.Parameters[2].Name) + assert.EqualValues(t, "env3", job.Settings.Environments[2].EnvironmentKey) + }) +} From 515359fde79fcfba2dbe3024a4ed7dfd4fa42818 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Mon, 17 Feb 2025 10:00:47 +0100 Subject: [PATCH 18/54] [Internal] Enable Automated Tagging for SDK (#1148) ## What changes are proposed in this pull request? Enable Automated Tagging for SDK to support faster releases. ## How is this tested? Automated Tagging process has been already enabled in `terraform-provider-databricks` repository. --- .codegen.json | 2 +- .codegen/changelog_config.yml | 3 - .github/workflows/next-changelog.yml | 105 +++++ .github/workflows/tagging.yml | 52 +++ .package.json | 1 + NEXT_CHANGELOG.md | 16 + service/pkg.go | 4 +- tagging.py | 571 +++++++++++++++++++++++++++ 8 files changed, 748 insertions(+), 6 deletions(-) create mode 100644 .github/workflows/next-changelog.yml create mode 100644 .github/workflows/tagging.yml create mode 100644 .package.json create mode 100644 NEXT_CHANGELOG.md create mode 100644 tagging.py diff --git a/.codegen.json b/.codegen.json index 91b22e87c..9da4ed72d 100644 --- a/.codegen.json +++ b/.codegen.json @@ -1,6 +1,6 @@ { "mode": "go_v0", - "changelog_config": ".codegen/changelog_config.yml", + "api_changelog": true, "version": { "version/version.go": "const Version = \"$VERSION\"" }, diff --git a/.codegen/changelog_config.yml b/.codegen/changelog_config.yml index 318cbdeb5..c137c72bb 100644 --- a/.codegen/changelog_config.yml +++ b/.codegen/changelog_config.yml @@ -7,8 +7,5 @@ change_types: tag: "[Doc]" - message: Internal Changes tag: "[Internal]" - # Does not appear in the Changelog. Only for PR validation. - - message: Release - tag: "[Release]" # Default for messages without a tag - message: Other Changes \ No newline at end of file diff --git a/.github/workflows/next-changelog.yml b/.github/workflows/next-changelog.yml new file mode 100644 index 000000000..b9f1792f1 --- /dev/null +++ b/.github/workflows/next-changelog.yml @@ -0,0 +1,105 @@ +# Generated file. DO NOT EDIT. +name: Check for NEXT_CHANGELOG.md Changes + +on: + # Use pull_request_target to have access to GitHub API + pull_request_target: + +jobs: + check-next-changelog: + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + + steps: + - name: Checkout code + uses: actions/checkout@v3 + + - name: Fetch list of changed files + id: changed-files + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Use the GitHub API to fetch changed files + files=$(gh pr view ${{ github.event.pull_request.number }} --json files -q '.files[].path') + + # Sanitize to avoid code injection + sanitized_files=$(echo "$files" | sed 's/[^a-zA-Z0-9._/-]/_/g') + + # Store the sanitized list of files in a temporary file to avoid env variable issues + echo "$sanitized_files" > modified_files.txt + + - name: Fetch PR message + id: pr-message + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Use the GitHub API to fetch the PR message + pr_message=$(gh pr view ${{ github.event.pull_request.number }} --json body -q '.body') + + # Sanitize the PR message to avoid code injection, keeping the equal sign + sanitized_pr_message=$(echo "$pr_message" | sed 's/[^a-zA-Z0-9._/-=]/_/g') + + # Store the sanitized PR message + echo "$sanitized_pr_message" > pr_message.txt + + - name: Verify NEXT_CHANGELOG.md was modified or PR message contains NO_CHANGELOG=true + run: | + # Read the sanitized files and PR message from the temporary files + modified_files=$(cat modified_files.txt) + pr_message=$(cat pr_message.txt) + + # Check if NEXT_CHANGELOG.md exists in the list of changed files + echo "Changed files: $modified_files" + if ! echo "$modified_files" | grep -q "NEXT_CHANGELOG.md"; then + echo "NEXT_CHANGELOG.md not modified." + + # Check if PR message contains NO_CHANGELOG=true + if echo "$pr_message" | grep -q "NO_CHANGELOG=true"; then + echo "NO_CHANGELOG=true found in PR message. Skipping changelog check." + exit 0 + else + echo "WARNING: file NEXT_CHANGELOG.md not changed. If this is expected, add NO_CHANGELOG=true to the PR message." + exit 1 + fi + fi + + - name: Comment on PR with instructions if needed + if: failure() # This step will only run if the previous step fails (i.e., if NEXT_CHANGELOG.md was not modified and NO_CHANGELOG=true was not in the PR message) + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Check if a comment exists with the instructions + previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \ + --jq '.[] | select(.body | startswith("")) | .id') + echo "Previous comment IDs: $previous_comment_ids" + + # If no previous comment exists, add one with instructions + if [ -z "$previous_comment_ids" ]; then + echo "Adding instructions comment." + gh pr comment ${{ github.event.pull_request.number }} --body \ + " + Please ensure that the NEXT_CHANGELOG.md file is updated with any relevant changes. + If this is not necessary for your PR, please include the following in your PR description: + NO_CHANGELOG=true + and rerun the job." + fi + + - name: Delete instructions comment on success + if: success() # This step will only run if the previous check passed (i.e., if NEXT_CHANGELOG.md was modified or NO_CHANGELOG=true is in the PR message) + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + run: | + # Check if there is a previous instructions comment + previous_comment_ids=$(gh api "repos/${{ github.repository }}/issues/${{ github.event.pull_request.number }}/comments" \ + --jq '.[] | select(.body | startswith("")) | .id') + + # If a comment exists, delete it + if [ -n "$previous_comment_ids" ]; then + echo "Deleting previous instructions comment." + for comment_id in $previous_comment_ids; do + gh api "repos/${{ github.repository }}/issues/comments/$comment_id" --method DELETE + done + else + echo "No instructions comment found to delete." + fi diff --git a/.github/workflows/tagging.yml b/.github/workflows/tagging.yml new file mode 100644 index 000000000..558f2993a --- /dev/null +++ b/.github/workflows/tagging.yml @@ -0,0 +1,52 @@ +# Generated file. DO NOT EDIT. +name: tagging + +on: + workflow_dispatch: + # Enable for automatic tagging + #schedule: + # - cron: '0 0 * * TUE' + +# Ensure that only a single instance of the workflow is running at a time. +concurrency: + group: "tagging" + + +jobs: + tag: + environment: "release-is" + runs-on: + group: databricks-deco-testing-runner-group + labels: ubuntu-latest-deco + steps: + - name: Generate GitHub App Token + id: generate-token + uses: actions/create-github-app-token@v1 + with: + app-id: ${{ secrets.DECO_SDK_TAGGING_APP_ID }} + private-key: ${{ secrets.DECO_SDK_TAGGING_PRIVATE_KEY }} + + - name: Checkout repository + uses: actions/checkout@v4 + with: + fetch-depth: 0 + token: ${{ steps.generate-token.outputs.token }} + + #NOTE: email must be the GitHub App email or the commit will not be verified. + - name: Set up Git configuration + run: | + git config user.name "Databricks SDK Release Bot" + git config user.email "DECO-SDK-Tagging[bot]@users.noreply.github.com" + + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install PyGithub + + - name: Run script + env: + GITHUB_TOKEN: ${{ steps.generate-token.outputs.token }} + GITHUB_REPOSITORY: ${{ github.repository }} + run: | + python tagging.py + diff --git a/.package.json b/.package.json new file mode 100644 index 000000000..9e26dfeeb --- /dev/null +++ b/.package.json @@ -0,0 +1 @@ +{} \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md new file mode 100644 index 000000000..9bb04b137 --- /dev/null +++ b/NEXT_CHANGELOG.md @@ -0,0 +1,16 @@ +# NEXT CHANGELOG + +## Release v0.59.0 + +### New Features and Improvements + +### Bug Fixes + +### Documentation + +### Internal Changes +* Introduce automated tagging ([#1148](https://github.com/databricks/databricks-sdk-go/pull/1148)). +* Update Jobs GetJob API to support paginated responses ([#1133](https://github.com/databricks/databricks-sdk-go/pull/1133)). +* Update Jobs GetRun API to support paginated responses ([#1132](https://github.com/databricks/databricks-sdk-go/pull/1132)). + +### API Changes diff --git a/service/pkg.go b/service/pkg.go index d42c20d64..c68cd1100 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -52,10 +52,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. -// // - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. // +// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. diff --git a/tagging.py b/tagging.py new file mode 100644 index 000000000..c57621fb4 --- /dev/null +++ b/tagging.py @@ -0,0 +1,571 @@ +#!/usr/bin/env python3 + +import os +import re +import argparse +from typing import Optional, List, Callable +from dataclasses import dataclass +import subprocess +import time +import json +from github import Github, Repository, InputGitTreeElement, InputGitAuthor +from datetime import datetime, timezone + +NEXT_CHANGELOG_FILE_NAME = "NEXT_CHANGELOG.md" +CHANGELOG_FILE_NAME = "CHANGELOG.md" +PACKAGE_FILE_NAME = ".package.json" +""" +This script tags the release of the SDKs using a combination of the GitHub API and Git commands. +It reads the local repository to determine necessary changes, updates changelogs, and creates tags. + +### How it Works: +- It does **not** modify the local repository directly. +- Instead of committing and pushing changes locally, it uses the **GitHub API** to create commits and tags. +""" + + +# GitHub does not support signing commits for GitHub Apps directly. +# This class replaces usages for git commands such as "git add", "git commit", and "git push". +@dataclass +class GitHubRepo: + def __init__(self, repo: Repository): + self.repo = repo + self.changed_files: list[InputGitTreeElement] = [] + self.ref = "heads/main" + head_ref = self.repo.get_git_ref(self.ref) + self.sha = head_ref.object.sha + + # Replaces "git add file" + def add_file(self, loc: str, content: str): + local_path = os.path.relpath(loc, os.getcwd()) + print(f"Adding file {local_path}") + blob = self.repo.create_git_blob(content=content, encoding="utf-8") + element = InputGitTreeElement(path=local_path, mode="100644", type="blob", sha=blob.sha) + self.changed_files.append(element) + + # Replaces "git commit && git push" + def commit_and_push(self, message: str): + head_ref = self.repo.get_git_ref(self.ref) + base_tree = self.repo.get_git_tree(sha=head_ref.object.sha) + new_tree = self.repo.create_git_tree(self.changed_files, base_tree) + parent_commit = self.repo.get_git_commit(head_ref.object.sha) + + new_commit = self.repo.create_git_commit( + message=message, tree=new_tree, parents=[parent_commit]) + # Update branch reference + head_ref.edit(new_commit.sha) + self.sha = new_commit.sha + + def reset(self, sha: Optional[str] = None): + self.changed_files = [] + if sha: + self.sha = sha + else: + head_ref = self.repo.get_git_ref(self.ref) + self.sha = head_ref.object.sha + + def tag(self, tag_name: str, tag_message: str): + # Create a tag pointing to the new commit + # The email MUST be the GitHub Apps email. + # Otherwise, the tag will not be verified. + tagger = InputGitAuthor( + name="Databricks SDK Release Bot", + email="DECO-SDK-Tagging[bot]@users.noreply.github.com") + + tag = self.repo.create_git_tag( + tag=tag_name, message=tag_message, object=self.sha, type="commit", tagger=tagger) + # Create a Git ref (the actual reference for the tag in the repo) + self.repo.create_git_ref(ref=f"refs/tags/{tag_name}", sha=tag.sha) + + +gh: Optional[GitHubRepo] = None + + +@dataclass +class Package: + """ + Represents a package in the repository. + :name: The package name. + :path: The path to the package relative to the repository root. + """ + name: str + path: str + + +@dataclass +class TagInfo: + """ + Represents all changes on a release. + :package: package info. + :version: release version for the package. Format: v.. + :content: changes for the release, as they appear in the changelog. + + Example: + + ## Release v0.56.0 + + ### New Features and Improvements + * Feature + * Some improvement + + ### Bug Fixes + * Bug fix + + ### Documentation + * Doc Changes + + ### Internal Changes + * More Changes + + ### API Changes + * Add new Service + + """ + + package: Package + version: str + content: str + + def tag_name(self) -> str: + return f"{self.package.name}/v{self.version}" if self.package.name else f"v{self.version}" + + +def get_package_name(package_path: str) -> str: + """ + Returns the package name from the package path. + The name is found inside the .package.json file: + { + "package": "package_name" + } + """ + filepath = os.path.join(os.getcwd(), package_path, PACKAGE_FILE_NAME) + with open(filepath, 'r') as file: + content = json.load(file) + if "package" in content: + return content["package"] + # Legacy SDKs have no packages. + return "" + + +def update_version_references(tag_info: TagInfo) -> None: + """ + Updates the version of the package in code references. + Code references are defined in .package.json files. + """ + + # Load version patterns from '.package.json' file + package_file_path = os.path.join(os.getcwd(), tag_info.package.path, PACKAGE_FILE_NAME) + with open(package_file_path, 'r') as file: + package_file = json.load(file) + + version = package_file.get('version') + if not version: + print(f"Version not found in .package.json. Nothing to update.") + return + + # Update the versions + for filename, pattern in version.items(): + loc = os.path.join(os.getcwd(), tag_info.package.path, filename) + previous_version = re.sub(r'\$VERSION', r"\\d+\\.\\d+\\.\\d+", pattern) + new_version = re.sub(r'\$VERSION', tag_info.version, pattern) + + with open(loc, 'r') as file: + content = file.read() + + # Replace the version in the file content + updated_content = re.sub(previous_version, new_version, content) + + gh.add_file(loc, updated_content) + + +def clean_next_changelog(package_path: str) -> None: + """ + Cleans the "NEXT_CHANGELOG.md" file. It performs 2 operations: + * Increase the version to the next minor version. + * Remove release notes. Sections names are kept to + keep consistency in the section names between releases. + """ + + file_path = os.path.join(os.getcwd(), package_path, NEXT_CHANGELOG_FILE_NAME) + with open(file_path, 'r') as file: + content = file.read() + + # Remove content between ### sections + cleaned_content = re.sub(r'(### [^\n]+\n)(?:.*?\n?)*?(?=###|$)', r'\1', content) + # Ensure there is exactly one empty line before each section + cleaned_content = re.sub(r'(\n*)(###[^\n]+)', r'\n\n\2', cleaned_content) + # Find the version number + version_match = re.search(r'Release v(\d+)\.(\d+)\.(\d+)', cleaned_content) + if not version_match: + raise Exception("Version not found in the changelog") + major, minor, patch = map(int, version_match.groups()) + # Prepare next release version. + # When doing a PR, teams can adjust the version. + # By default, we increase a minor version, since minor versions releases + # are more common than patch or major version releases. + minor += 1 + patch = 0 + new_version = f'Release v{major}.{minor}.{patch}' + cleaned_content = cleaned_content.replace(version_match.group(0), new_version) + + # Update file with cleaned content + gh.add_file(file_path, cleaned_content) + + +def get_previous_tag_info(package: Package) -> Optional[TagInfo]: + """ + Extracts the previous tag info from the "CHANGELOG.md" file. + Used for failure recovery purposes. + """ + changelog_path = os.path.join(os.getcwd(), package.path, CHANGELOG_FILE_NAME) + + with open(changelog_path, 'r') as f: + changelog = f.read() + + # Extract the latest release section using regex + match = re.search(r"## (\[Release\] )?Release v[\d\.]+.*?(?=\n## (\[Release\] )?Release v|\Z)", + changelog, re.S) + + # E.g., for new packages. + if not match: + return None + + latest_release = match.group(0) + version_match = re.search(r'## (\[Release\] )?Release v(\d+\.\d+\.\d+)', latest_release) + + if not version_match: + raise Exception("Version not found in the changelog") + + return TagInfo(package=package, version=version_match.group(2), content=latest_release) + + +def get_next_tag_info(package: Package) -> Optional[TagInfo]: + """ + Extracts the changes from the "NEXT_CHANGELOG.md" file. + The result is already processed. + """ + next_changelog_path = os.path.join(os.getcwd(), package.path, NEXT_CHANGELOG_FILE_NAME) + # Read NEXT_CHANGELOG.md + with open(next_changelog_path, 'r') as f: + next_changelog = f.read() + + # Remove "# NEXT CHANGELOG" line + next_changelog = re.sub(r'^# NEXT CHANGELOG(\n+)', '', next_changelog, flags=re.MULTILINE) + + # Remove empty sections + next_changelog = re.sub(r'###[^\n]+\n+(?=##|\Z)', '', next_changelog) + # Ensure there is exactly one empty line before each section + next_changelog = re.sub(r'(\n*)(###[^\n]+)', r'\n\n\2', next_changelog) + + if not re.search(r'###', next_changelog): + print("All sections are empty. No changes will be made to the changelog.") + return None + + version_match = re.search(r'## Release v(\d+\.\d+\.\d+)', next_changelog) + + if not version_match: + raise Exception("Version not found in the changelog") + + return TagInfo(package=package, version=version_match.group(1), content=next_changelog) + + +def write_changelog(tag_info: TagInfo) -> None: + """ + Updates the changelog with a new tag info. + """ + changelog_path = os.path.join(os.getcwd(), tag_info.package.path, CHANGELOG_FILE_NAME) + with open(changelog_path, 'r') as f: + changelog = f.read() + updated_changelog = re.sub(r'(# Version changelog\n\n)', f'\\1{tag_info.content.strip()}\n\n\n', + changelog) + gh.add_file(changelog_path, updated_changelog) + + +def process_package(package: Package) -> TagInfo: + """ + Processes a package + """ + # Prepare tag_info from NEXT_CHANGELOG.md + print(f"Processing package {package.name}") + tag_info = get_next_tag_info(package) + + # If there are no updates, skip. + if tag_info is None: + return + + write_changelog(tag_info) + clean_next_changelog(package.path) + update_version_references(tag_info) + return tag_info + + +def find_packages() -> List[Package]: + """ + Returns all directories which contains a ".package.json" file. + """ + paths = _find_directories_with_file(PACKAGE_FILE_NAME) + return [Package(name=get_package_name(path), path=path) for path in paths] + + +def _find_directories_with_file(target_file: str) -> List[str]: + root_path = os.getcwd() + matching_directories = [] + + for dirpath, _, filenames in os.walk(root_path): + if target_file in filenames: + path = os.path.relpath(dirpath, root_path) + # If the path is the root directory (e.g., SDK V0), set it to an empty string. + if path == ".": + path = "" + matching_directories.append(path) + + return matching_directories + + +def is_tag_applied(tag: TagInfo) -> bool: + """ + Returns whether a tag is already applied in the repository. + + :param tag: The tag to check. + :return: True if the tag is applied, False otherwise. + :raises Exception: If the git command fails. + """ + try: + # Check if the specific tag exists + result = subprocess.check_output( + ['git', 'tag', '--list', tag.tag_name()], stderr=subprocess.PIPE, text=True) + return result.strip() == tag.tag_name() + except subprocess.CalledProcessError as e: + # Raise a exception for git command errors + raise Exception(f"Git command failed: {e.stderr.strip() or e}") from e + + +def find_last_tags() -> List[TagInfo]: + """ + Finds the last tags for each package. + + Returns a list of TagInfo objects for each package with a non-None changelog. + """ + packages = find_packages() + + return [ + info for info in (get_previous_tag_info(package) for package in packages) + if info is not None + ] + + +def find_pending_tags() -> List[TagInfo]: + """ + Finds all tags that are pending to be applied. + """ + tag_infos = find_last_tags() + return [tag for tag in tag_infos if not is_tag_applied(tag)] + + +def generate_commit_message(tag_infos: List[TagInfo]) -> str: + """ + Generates a commit message for the release. + """ + if not tag_infos: + raise Exception("No tag infos provided to generate commit message") + + info = tag_infos[0] + # Legacy mode for SDKs without per service packaging + if not info.package.name: + if len(tag_infos) > 1: + raise Exception("Multiple packages found in legacy mode") + return f"[Release] Release v{info.version}\n\n{info.content}" + + # Sort tag_infos by package name for consistency + tag_infos.sort(key=lambda info: info.package.name) + return 'Release\n\n' + '\n\n'.join(f"## {info.package.name}/v{info.version}\n\n{info.content}" + for info in tag_infos) + + +def push_changes(tag_infos: List[TagInfo]) -> None: + """Pushes changes to the remote repository after handling possible merge conflicts.""" + + commit_message = generate_commit_message(tag_infos) + + # Create the release metadata file + file_name = os.path.join(os.getcwd(), ".release_metadata.json") + metadata = {"timestamp": datetime.now(tz=timezone.utc).strftime("%Y-%m-%d %H:%M:%S%z")} + content = json.dumps(metadata, indent=4) + gh.add_file(file_name, content) + + gh.commit_and_push(commit_message) + + +def reset_repository(hash: Optional[str] = None) -> None: + """ + Reset git to the specified commit. Defaults to HEAD. + + :param hash: The commit hash to reset to. If None, it resets to HEAD. + """ + # Fetch the latest changes from the remote repository + subprocess.run(['git', 'fetch']) + + # Determine the commit hash (default to origin/main if none is provided) + commit_hash = hash or 'origin/main' + + # Reset in memory changed files and the commit hash + gh.reset(hash) + + # Construct the Git reset command + command = ['git', 'reset', '--hard', commit_hash] + + # Execute the git reset command + subprocess.run(command, check=True) + + +def retry_function(func: Callable[[], List[TagInfo]], + cleanup: Callable[[], None], + max_attempts: int = 5, + delay: int = 5) -> List[TagInfo]: + """ + Calls a function call up to `max_attempts` times if an exception occurs. + + :param func: The function to call. + :param cleanup: Cleanup function in between retries + :param max_attempts: The maximum number of retries. + :param delay: The delay between retries in seconds. + :return: The return value of the function, or None if all retries fail. + """ + attempts = 0 + while attempts <= max_attempts: + try: + return func() # Call the function + except Exception as e: + attempts += 1 + print(f"Attempt {attempts} failed: {e}") + if attempts < max_attempts: + time.sleep(delay) # Wait before retrying + cleanup() + else: + print("All retry attempts failed.") + raise e # Re-raise the exception after max retries + + +def update_changelogs(packages: List[Package]) -> List[TagInfo]: + """ + Updates changelogs and pushes the commits. + """ + tag_infos = [ + info for info in (process_package(package) for package in packages) if info is not None + ] + # If any package was changed, push the changes. + if tag_infos: + push_changes(tag_infos) + return tag_infos + + +def push_tags(tag_infos: List[TagInfo]) -> None: + """ + Creates and pushes tags to the repository. + """ + for tag_info in tag_infos: + gh.tag(tag_info.tag_name(), tag_info.content) + + +def run_command(command: List[str]) -> str: + """ + Runs a command and returns the output + """ + output = subprocess.check_output(command) + print(f'Running command: {" ".join(command)}') + return output.decode() + + +def pull_last_release_commit() -> None: + """ + Reset the repository to the last release. + Uses commit for last change to .release_metadata.json, since it's only updated on releases. + """ + commit_hash = subprocess.check_output( + ['git', 'log', '-n', '1', '--format=%H', '--', '.release_metadata.json'], + text=True).strip() + + # If no commit is found, raise an exception + if not commit_hash: + raise ValueError("No commit found for .release_metadata.json") + + # Reset the repository to the commit + reset_repository(commit_hash) + + +def get_package_from_args() -> Optional[str]: + """ + Retrieves an optional package + python3 ./tagging.py --package + """ + parser = argparse.ArgumentParser(description='Update changelogs and tag the release.') + parser.add_argument('--package', '-p', type=str, help='Tag a single package') + args = parser.parse_args() + return args.package + + +def init_github(): + token = os.environ['GITHUB_TOKEN'] + repo_name = os.environ['GITHUB_REPOSITORY'] + g = Github(token) + repo = g.get_repo(repo_name) + global gh + gh = GitHubRepo(repo) + + +def process(): + """ + Main entry point for tagging process. + + Tagging process consist of multiple steps: + * For each package, update the corresponding CHANGELOG.md file based on the contents of NEXT_CHANGELOG.md file + * If any package has been updated, commit and push the changes. + * Apply and push the new tags matching the version. + + If a specific pagkage is provided as a parameter, only that package will be tagged. + + If any tag are pending from an early process, it will skip updating the CHANGELOG.md files and only apply the tags. + """ + + package_name = get_package_from_args() + pending_tags = find_pending_tags() + + # pending_tags is non-empty only when the tagging process previously failed or interrupted. + # We must complete the interrupted tagging process before starting a new one to avoid inconsistent states and missing changelog entries. + # Therefore, we don't support specifying the package until the previously started process has been successfully completed. + if pending_tags and package_name: + pending_packages = [tag.package.name for tag in pending_tags] + raise Exception( + f"Cannot release package {package_name}. Pending release for {pending_packages}") + + if pending_tags: + print("Found pending tags from previous executions, entering recovery mode.") + pull_last_release_commit() + push_tags(pending_tags) + return + + packages = find_packages() + # If a package is specified as an argument, only process that package + if package_name: + packages = [package for package in packages if package.name == package_name] + + pending_tags = retry_function( + func=lambda: update_changelogs(packages), cleanup=reset_repository) + push_tags(pending_tags) + + +def validate_git_root(): + """ + Validate that the script is run from the root of the repository. + """ + repo_root = subprocess.check_output(["git", "rev-parse", + "--show-toplevel"]).strip().decode("utf-8") + current_dir = subprocess.check_output(["pwd"]).strip().decode("utf-8") + if repo_root != current_dir: + raise Exception("Please run this script from the root of the repository.") + + +if __name__ == "__main__": + validate_git_root() + init_github() + process() From 8f32c2e630148b76ca1ff5f52c2e402483f5441d Mon Sep 17 00:00:00 2001 From: Giorgi Kikolashvili <47174341+gkiko10@users.noreply.github.com> Date: Mon, 24 Feb 2025 16:35:40 +0100 Subject: [PATCH 19/54] [Internal] Update Jobs `list` function to support paginated responses (#1150) ## What changes are proposed in this pull request? Introduces logic in the extension for jobs ListJobs call. The extended logic accounts for the new response format of API 2.2. API 2.1 format returns all tasks and job_cluster for each job in the jobs list. API 2.2 format truncates tasks and job_cluster to 100 elements. The extended ListJobs logic calls GetJob for each job in the list to populate the full list of tasks and job_clusters. I added logic that reads jobs from the list and produces custom iterator struct `expandedJobsIterator` that is supposed to mimic python generators. The goal is to only read necessary elements from the API endpoint and not more. ## How is this tested? Unit tests and manual tests. Manual tests were done in two modes: using API 2.2 and using API 2.1. So this code is compatible with both API versions. --- NEXT_CHANGELOG.md | 1 + service/jobs/ext_api.go | 58 +++- service/jobs/ext_api_test.go | 613 +++++++++++++++++++++++++++++++++++ 3 files changed, 671 insertions(+), 1 deletion(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 9bb04b137..d3ac93f99 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -9,6 +9,7 @@ ### Documentation ### Internal Changes +* Update Jobs ListJobs API to support paginated responses ([#1150](https://github.com/databricks/databricks-sdk-go/pull/1150)) * Introduce automated tagging ([#1148](https://github.com/databricks/databricks-sdk-go/pull/1148)). * Update Jobs GetJob API to support paginated responses ([#1133](https://github.com/databricks/databricks-sdk-go/pull/1133)). * Update Jobs GetRun API to support paginated responses ([#1132](https://github.com/databricks/databricks-sdk-go/pull/1132)). diff --git a/service/jobs/ext_api.go b/service/jobs/ext_api.go index f834d6e40..eb96b6ca0 100644 --- a/service/jobs/ext_api.go +++ b/service/jobs/ext_api.go @@ -1,6 +1,62 @@ package jobs -import "context" +import ( + "context" + + "github.com/databricks/databricks-sdk-go/listing" +) + +// List fetches a list of jobs. +// If expand_tasks is true, the response will include the full list of tasks and job_clusters for each job. +// This function handles pagination two ways: paginates all the jobs in the list and paginates all the tasks and job_clusters for each job. +func (a *JobsAPI) List(ctx context.Context, request ListJobsRequest) listing.Iterator[BaseJob] { + // Fetch jobs with limited elements in top level arrays + jobsList := a.jobsImpl.List(ctx, request) + + if !request.ExpandTasks { + return jobsList + } + + return &expandedJobsIterator{ + originalIterator: jobsList, + service: a, + } +} + +// expandedJobsIterator is a custom iterator that for each job calls job/get in order to fetch full list of tasks and job_clusters. +type expandedJobsIterator struct { + originalIterator listing.Iterator[BaseJob] + service *JobsAPI +} + +func (e *expandedJobsIterator) HasNext(ctx context.Context) bool { + return e.originalIterator.HasNext(ctx) +} + +func (e *expandedJobsIterator) Next(ctx context.Context) (BaseJob, error) { + job, err := e.originalIterator.Next(ctx) + if err != nil { + return BaseJob{}, err + } + if !job.HasMore { + return job, nil + } + + // Fully fetch all top level arrays for the job + getJobRequest := GetJobRequest{JobId: job.JobId} + fullJob, err := e.service.Get(ctx, getJobRequest) + if err != nil { + return BaseJob{}, err + } + + job.Settings.Tasks = fullJob.Settings.Tasks + job.Settings.JobClusters = fullJob.Settings.JobClusters + job.Settings.Parameters = fullJob.Settings.Parameters + job.Settings.Environments = fullJob.Settings.Environments + job.HasMore = false + + return job, nil +} // GetRun retrieves a run based on the provided request. // It handles pagination if the run contains multiple iterations or tasks. diff --git a/service/jobs/ext_api_test.go b/service/jobs/ext_api_test.go index bf6a73c70..a7af26668 100644 --- a/service/jobs/ext_api_test.go +++ b/service/jobs/ext_api_test.go @@ -668,3 +668,616 @@ func TestGetJob(t *testing.T) { assert.EqualValues(t, "env3", job.Settings.Environments[2].EnvironmentKey) }) } + +func TestListJobs(t *testing.T) { + ctx := context.Background() + + t.Run("jobs list with no task expansion", func(t *testing.T) { + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.2/jobs/list?", + Response: ListJobsResponse{ + Jobs: []BaseJob{ + { + JobId: 100, + Settings: &JobSettings{ + Name: "job_100", + }, + }, + { + JobId: 200, + Settings: &JobSettings{ + Name: "job_200", + }, + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/list?page_token=token1", + Response: ListJobsResponse{ + Jobs: []BaseJob{ + { + JobId: 300, + Settings: &JobSettings{ + Name: "job_300", + }, + }, + { + JobId: 400, + Settings: &JobSettings{ + Name: "job_400", + }, + }, + }, + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/jobs/list?", + Response: ListJobsResponse{ + Jobs: []BaseJob{ + { + JobId: 100, + Settings: &JobSettings{ + Name: "job_100", + }, + }, + { + JobId: 200, + Settings: &JobSettings{ + Name: "job_200", + }, + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + ReuseRequest: true, + Resource: "/api/2.1/jobs/list?page_token=token1", + Response: ListJobsResponse{ + Jobs: []BaseJob{ + { + JobId: 300, + Settings: &JobSettings{ + Name: "job_300", + }, + }, + { + JobId: 400, + Settings: &JobSettings{ + Name: "job_400", + }, + }, + }, + }, + }, + } + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + jobsList := api.List(ctx, ListJobsRequest{}) + var allJobs []BaseJob + for jobsList.HasNext(ctx) { + job, err := jobsList.Next(ctx) + assert.NoError(t, err) + assert.NotEmpty(t, job.JobId) + allJobs = append(allJobs, job) + } + + assert.EqualValues(t, len(allJobs), 4) + assert.EqualValues(t, allJobs[0].JobId, 100) + assert.EqualValues(t, allJobs[2].JobId, 300) + assert.EqualValues(t, allJobs[3].JobId, 400) + assert.EqualValues(t, allJobs[3].Settings.Name, "job_400") + }) + + t.Run("jobs list with task expansion", func(t *testing.T) { + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.2/jobs/list?expand_tasks=true", + Response: ListJobsResponse{ + Jobs: []BaseJob{ + { + JobId: 100, + Settings: &JobSettings{ + Name: "job_100", + Tasks: []Task{ + { + TaskKey: "job100_task1", + }, + { + TaskKey: "job100_task2", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "job100_cluster1", + }, + { + JobClusterKey: "job100_cluster2", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env1", + }, + }, + }, + HasMore: true, + }, + { + JobId: 200, + Settings: &JobSettings{ + Name: "job_200", + Tasks: []Task{ + { + TaskKey: "job200_task1", + }, + { + TaskKey: "job200_task2", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "job200_cluster1", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env1", + }, + }, + }, + HasMore: true, + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/list?expand_tasks=true&page_token=token1", + Response: ListJobsResponse{ + Jobs: []BaseJob{ + { + JobId: 300, + Settings: &JobSettings{ + Name: "job_300", + Tasks: []Task{ + { + TaskKey: "job300_task1", + }, + }, + }, + }, + { + JobId: 400, + Settings: &JobSettings{ + Name: "job_400", + Tasks: []Task{ + { + TaskKey: "job400_task3", + }, + { + TaskKey: "job400_task1", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, + }, + HasMore: true, + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=100", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "job100_task1", + }, + { + TaskKey: "job100_task2", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "job100_cluster1", + }, + { + JobClusterKey: "job100_cluster2", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env1", + }, + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=100&page_token=token1", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "job100_task3", + }, + { + TaskKey: "job100_task4", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "job100_cluster3", + }, + { + JobClusterKey: "job100_cluster4", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param3", + Default: "default3", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env2", + }, + }, + }, + NextPageToken: "token2", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=100&page_token=token2", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "job100_task5", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env3", + }, + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=200", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "job200_task1", + }, + { + TaskKey: "job200_task2", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "job200_cluster1", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env1", + }, + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=200&page_token=token1", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "job200_task3", + }, + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=400", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "job400_task1", + }, + { + TaskKey: "job400_task2", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/get?job_id=400&page_token=token1", + Response: Job{ + Settings: &JobSettings{ + Tasks: []Task{ + { + TaskKey: "job400_task3", + }, + { + TaskKey: "job400_task4", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param3", + Default: "default3", + }, + { + Name: "param4", + Default: "default4", + }, + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/jobs/list?expand_tasks=true", + Response: ListJobsResponse{ + Jobs: []BaseJob{ + { + JobId: 100, + Settings: &JobSettings{ + Name: "job_100", + Tasks: []Task{ + { + TaskKey: "job100_task1", + }, + { + TaskKey: "job100_task2", + }, + { + TaskKey: "job100_task3", + }, + { + TaskKey: "job100_task4", + }, + { + TaskKey: "job100_task5", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "job100_cluster1", + }, + { + JobClusterKey: "job100_cluster2", + }, + { + JobClusterKey: "job100_cluster3", + }, + { + JobClusterKey: "job100_cluster4", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + { + Name: "param3", + Default: "default3", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env1", + }, + { + EnvironmentKey: "env2", + }, + { + EnvironmentKey: "env3", + }, + }, + }, + }, + { + JobId: 200, + Settings: &JobSettings{ + Name: "job_200", + Tasks: []Task{ + { + TaskKey: "job200_task1", + }, + { + TaskKey: "job200_task2", + }, + { + TaskKey: "job200_task3", + }, + }, + JobClusters: []JobCluster{ + { + JobClusterKey: "job200_cluster1", + }, + }, + Environments: []JobEnvironment{ + { + EnvironmentKey: "env1", + }, + }, + }, + }, + }, + NextPageToken: "token1", + }, + }, + { + Method: "GET", + Resource: "/api/2.1/jobs/list?expand_tasks=true&page_token=token1", + Response: ListJobsResponse{ + Jobs: []BaseJob{ + { + JobId: 300, + Settings: &JobSettings{ + Name: "job_300", + Tasks: []Task{ + { + TaskKey: "job300_task1", + }, + }, + }, + }, + { + JobId: 400, + Settings: &JobSettings{ + Name: "job_400", + Tasks: []Task{ + { + TaskKey: "job400_task1", + }, + { + TaskKey: "job400_task2", + }, + { + TaskKey: "job400_task4", + }, + { + TaskKey: "job400_task5", + }, + }, + Parameters: []JobParameterDefinition{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + { + Name: "param3", + Default: "default3", + }, + { + Name: "param4", + Default: "default4", + }, + }, + }, + }, + }, + }, + }, + } + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + jobsList := api.List(ctx, ListJobsRequest{ExpandTasks: true}) + var allJobs []BaseJob + for jobsList.HasNext(ctx) { + job, err := jobsList.Next(ctx) + assert.NoError(t, err) + assert.NotEmpty(t, job.JobId) + assert.Empty(t, job.HasMore) + allJobs = append(allJobs, job) + } + + assert.Equal(t, 4, len(allJobs)) + assert.Equal(t, 5, len(allJobs[0].Settings.Tasks)) + assert.Equal(t, 4, len(allJobs[0].Settings.JobClusters)) + assert.Equal(t, 3, len(allJobs[0].Settings.Parameters)) + assert.Equal(t, 3, len(allJobs[0].Settings.Environments)) + assert.Equal(t, 3, len(allJobs[1].Settings.Tasks)) + assert.Equal(t, 1, len(allJobs[1].Settings.JobClusters)) + assert.Empty(t, allJobs[1].Settings.Parameters) + assert.Equal(t, 1, len(allJobs[1].Settings.Environments)) + assert.Equal(t, 1, len(allJobs[2].Settings.Tasks)) + assert.Empty(t, allJobs[2].Settings.JobClusters) + assert.Empty(t, allJobs[2].Settings.Parameters) + assert.Empty(t, allJobs[2].Settings.Environments) + assert.EqualValues(t, 100, allJobs[0].JobId) + assert.EqualValues(t, 300, allJobs[2].JobId) + assert.EqualValues(t, 400, allJobs[3].JobId) + assert.EqualValues(t, "job_400", allJobs[3].Settings.Name) + }) +} From c72cd2d821345e0be491fb7700f3ec513811dc31 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Tue, 25 Feb 2025 15:19:29 +0100 Subject: [PATCH 20/54] [Fix] Add support for all error details types and fix potential unmarshalling error (#1153) ## What changes are proposed in this pull request? This PR refactors how `APIError` processes error details to fully support the Databricks Error specification. In particular, it provides a clear mechanism to access known error details types while still enabling users to access unknown error details. The core of the PR is the addition of a new `ErrorDetails` type which operates as the union of all error details type. It implements the fact that an error response should only contain at most _one instance of each type_. The code is purposely structured to hide implementation details by (i) exposing `ErrorDetails` via a function, and (ii) separating the exported error details types from their unmarshalling logic. ## How is this tested? 100% unit tests coverage of new code + slight refactor of the test suite. --- NEXT_CHANGELOG.md | 4 + apierr/details.go | 399 +++++++++++++++++++++++++++++++++++ apierr/errors.go | 72 +++++-- apierr/errors_test.go | 478 +++++++++++++++++++++++++++++++++++------- 4 files changed, 859 insertions(+), 94 deletions(-) create mode 100644 apierr/details.go diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index d3ac93f99..132bddd00 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -6,9 +6,13 @@ ### Bug Fixes +* Fix unlikely issue due to conflicting error details in `APIError`. + ### Documentation ### Internal Changes + +* Refactor `APIError` to expose different types of error details ([#1153](https://github.com/databricks/databricks-sdk-go/pull/1153)). * Update Jobs ListJobs API to support paginated responses ([#1150](https://github.com/databricks/databricks-sdk-go/pull/1150)) * Introduce automated tagging ([#1148](https://github.com/databricks/databricks-sdk-go/pull/1148)). * Update Jobs GetJob API to support paginated responses ([#1133](https://github.com/databricks/databricks-sdk-go/pull/1133)). diff --git a/apierr/details.go b/apierr/details.go new file mode 100644 index 000000000..1d7d5f1ab --- /dev/null +++ b/apierr/details.go @@ -0,0 +1,399 @@ +package apierr + +import ( + "encoding/json" + "time" +) + +// ErrorDetails contains the error details of an API error. It is the union of +// known error details types and unknown details. +type ErrorDetails struct { + ErrorInfo *ErrorInfo + RequestInfo *RequestInfo + RetryInfo *RetryInfo + DebugInfo *DebugInfo + QuotaFailure *QuotaFailure + PreconditionFailure *PreconditionFailure + BadRequest *BadRequest + ResourceInfo *ResourceInfo + Help *Help + + // UnknownDetails contains error details that cannot be unmarshalled into + // one of the known types above. + UnknownDetails []any +} + +// ErrorInfo describes the cause of the error with structured details. +type ErrorInfo struct { + // The reason of the error. This is a constant value that identifies the + // proximate cause of the error. + Reason string + + // The logical grouping to which the "reason" belongs. + Domain string + + // Additional structured details about this error. + Metadata map[string]string +} + +// RequestInfo Contains metadata about the request that clients can attach when +// filing a bug or providing other forms of feedback. +type RequestInfo struct { + // An opaque string that should only be interpreted by the service that + // generated it. For example, it can be used to identify requests in the + // service's logs. + RequestID string + + // Any data that was used to serve this request. For example, an encrypted + // stack trace that can be sent back to the service provider for debugging. + ServingData string +} + +// RetryInfo describes when the clients can retry a failed request. Clients +// could ignore the recommendation here or retry when this information is +// missing from error responses. +// +// It's always recommended that clients should use exponential backoff when +// retrying. +// +// Clients should wait until `retry_delay` amount of time has passed since +// receiving the error response before retrying. If retrying requests also +// fail, clients should use an exponential backoff scheme to gradually increase +// the delay between retries based on `retry_delay`, until either a maximum +// number of retries have been reached or a maximum retry delay cap has been +// reached. +type RetryInfo struct { + // Clients should wait at least this long between retrying the same request. + RetryDelay time.Duration +} + +// Describes additional debugging info. +type DebugInfo struct { + // The stack trace entries indicating where the error occurred. + StackEntries []string + + // Additional debugging information provided by the server. + Detail string +} + +// Describes how a quota check failed. +// +// For example if a daily limit was exceeded for the calling project, +// a service could respond with a QuotaFailure detail containing the project +// id and the description of the quota limit that was exceeded. If the +// calling project hasn't enabled the service in the developer console, then +// a service could respond with the project id and set `service_disabled` +// to true. +// +// Also see RetryInfo and Help types for other details about handling a +// quota failure. +type QuotaFailure struct { + // Describes all quota violations. + Violations []QuotaFailureViolation +} + +type QuotaFailureViolation struct { + // The subject on which the quota check failed. + Subject string + + // A description of how the quota check failed. Clients can use this + // description to find more about the quota configuration in the service's + // public documentation, or find the relevant quota limit to adjust through + // developer console. + // + // For example: "Service disabled" or "Daily Limit for read operations + // exceeded". + Description string +} + +// Describes what preconditions have failed. +type PreconditionFailure struct { + // Describes all precondition violations. + Violations []PreconditionFailureViolation +} + +type PreconditionFailureViolation struct { + // The type of PreconditionFailure. + Type string + + // The subject, relative to the type, that failed. + Subject string + + // A description of how the precondition failed. Developers can use this + // description to understand how to fix the failure. + // + // For example: "Terms of service not accepted". + Description string +} + +// Describes violations in a client request. This error type focuses on the +// syntactic aspects of the request. +type BadRequest struct { + FieldViolations []BadRequestFieldViolation +} + +type BadRequestFieldViolation struct { + // A path leading to a field in the request body. + Field string + + // A description of why the request element is bad. + Description string +} + +// Describes the resource that is being accessed. +type ResourceInfo struct { + // A name for the type of resource being accessed. + ResourceType string + + // The name of the resource being accessed. + ResourceName string + + // The owner of the resource (optional). + Owner string + + // Describes what error is encountered when accessing this resource. + Description string +} + +// Provides links to documentation or for performing an out of band action. +// +// For example, if a quota check failed with an error indicating the calling +// project hasn't enabled the accessed service, this can contain a URL pointing +// directly to the right place in the developer console to flip the bit. +type Help struct { + // URL(s) pointing to additional information on handling the current error. + Links []HelpLink +} + +type HelpLink struct { + // Describes what the link offers. + Description string + + // The URL of the link. + URL string +} + +const ( + errorInfoType string = "type.googleapis.com/google.rpc.ErrorInfo" + requestInfoType string = "type.googleapis.com/google.rpc.RequestInfo" + retryInfoType string = "type.googleapis.com/google.rpc.RetryInfo" + debugInfoType string = "type.googleapis.com/google.rpc.DebugInfo" + quotaFailureType string = "type.googleapis.com/google.rpc.QuotaFailure" + preconditionFailureType string = "type.googleapis.com/google.rpc.PreconditionFailure" + badRequestType string = "type.googleapis.com/google.rpc.BadRequest" + resourceInfoType string = "type.googleapis.com/google.rpc.ResourceInfo" + helpType string = "type.googleapis.com/google.rpc.Help" +) + +// errorInfoPb is the wire-format representation of ErrorInfo. It is used +// internally to unmarshal ErrorInfo from JSON. +type errorInfoPb struct { + Reason string `json:"reason"` + Domain string `json:"domain"` + Metadata map[string]string `json:"metadata"` +} + +// requestInfoPb is the wire-format representation of RequestInfo. It is used +// internally to unmarshal RequestInfo from JSON. +type requestInfoPb struct { + RequestID string `json:"request_id"` + ServingData string `json:"serving_data"` +} + +// retryInfoPb is the wire-format representation of RetryInfo. It is used +// internally to unmarshal RetryInfo from JSON. +type retryInfoPb struct { + RetryDelay durationPb `json:"retry_delay"` +} + +type durationPb struct { + Seconds int64 `json:"seconds"` + Nanos int64 `json:"nanos"` +} + +// debugInfoPb is the wire-format representation of DebugInfo. It is used +// internally to unmarshal DebugInfo from JSON. +type debugInfoPb struct { + StackEntries []string `json:"stack_entries"` + Detail string `json:"detail"` +} + +// quotaFailurePb is the wire-format representation of QuotaFailure. It is used +// internally to unmarshal QuotaFailure from JSON. +type quotaFailurePb struct { + Violations []quotaFailureViolationPb `json:"violations"` +} + +type quotaFailureViolationPb struct { + Subject string `json:"subject"` + Description string `json:"description"` +} + +// preconditionFailurePb is the wire-format representation of PreconditionFailure. +// It is used internally to unmarshal PreconditionFailure from JSON. +type preconditionFailurePb struct { + Violations []preconditionFailureViolationPb `json:"violations"` +} + +type preconditionFailureViolationPb struct { + Type string `json:"type"` + Subject string `json:"subject"` + Description string `json:"description"` +} + +// badRequestPb is the wire-format representation of BadRequest. It is used +// internally to unmarshal BadRequest from JSON. +type badRequestPb struct { + FieldViolations []badRequestFieldViolationPb `json:"field_violations"` +} + +type badRequestFieldViolationPb struct { + Field string `json:"field"` + Description string `json:"description"` +} + +// resourceInfoPb is the wire-format representation of ResourceInfo. It is used +// internally to unmarshal ResourceInfo from JSON. +type resourceInfoPb struct { + ResourceType string `json:"resource_type"` + ResourceName string `json:"resource_name"` + Owner string `json:"owner"` + Description string `json:"description"` +} + +// helpPb is the wire-format representation of Help. It is used internally to +// unmarshal Help from JSON. +type helpPb struct { + Links []helpLinkPb `json:"links"` +} + +type helpLinkPb struct { + Description string `json:"description"` + URL string `json:"url"` +} + +func parseErrorDetails(details []any) ErrorDetails { + var ed ErrorDetails + for _, d := range details { + switch d := d.(type) { + case ErrorInfo: + ed.ErrorInfo = &d + case RequestInfo: + ed.RequestInfo = &d + case RetryInfo: + ed.RetryInfo = &d + case DebugInfo: + ed.DebugInfo = &d + case QuotaFailure: + ed.QuotaFailure = &d + case PreconditionFailure: + ed.PreconditionFailure = &d + case BadRequest: + ed.BadRequest = &d + case ResourceInfo: + ed.ResourceInfo = &d + case Help: + ed.Help = &d + default: + ed.UnknownDetails = append(ed.UnknownDetails, d) + } + } + return ed +} + +// unmarshalDetails attempts to unmarshal the given slice of bytes into a known +// error details type. It works as follows: +// +// - If the message is a known type, it unmarshals the message into that type. +// - If the message is not a known type, it returns the results of calling +// [json.Unmarshal] on the raw message. +// - If [json.Unmarshal] fails, it returns the input as is. +func unmarshalDetails(d []byte) any { + var a any + if err := json.Unmarshal(d, &a); err != nil { + return d // not a valid JSON message + } + m, ok := a.(map[string]any) + if !ok { + return a // not a JSON object + } + t, ok := m["@type"].(string) + if !ok { + return a // JSON object with no @type field + } + + switch t { + case errorInfoType: + var pb errorInfoPb + if err := json.Unmarshal(d, &pb); err != nil { + return m // not a valid known type + } + return ErrorInfo(pb) + case requestInfoType: + var pb requestInfoPb + if err := json.Unmarshal(d, &pb); err != nil { + return m // not a valid known type + } + return RequestInfo(pb) + case retryInfoType: + var pb retryInfoPb + if err := json.Unmarshal(d, &pb); err != nil { + return m // not a valid known type + } + return RetryInfo{RetryDelay: time.Duration(pb.RetryDelay.Seconds)*time.Second + time.Duration(pb.RetryDelay.Nanos)*time.Nanosecond} + case debugInfoType: + var pb debugInfoPb + if err := json.Unmarshal(d, &pb); err != nil { + return m // not a valid known type + } + return DebugInfo(pb) + case quotaFailureType: + var pb quotaFailurePb + if err := json.Unmarshal(d, &pb); err != nil { + return m // not a valid known type + } + qf := QuotaFailure{} + for _, v := range pb.Violations { + qf.Violations = append(qf.Violations, QuotaFailureViolation(v)) + } + return qf + case preconditionFailureType: + var pb preconditionFailurePb + if err := json.Unmarshal(d, &pb); err != nil { + return m // not a valid known type + } + pf := PreconditionFailure{} + for _, v := range pb.Violations { + pf.Violations = append(pf.Violations, PreconditionFailureViolation(v)) + } + return pf + case badRequestType: + var pb badRequestPb + if err := json.Unmarshal(d, &pb); err != nil { + return m // not a valid known type + } + br := BadRequest{} + for _, v := range pb.FieldViolations { + br.FieldViolations = append(br.FieldViolations, BadRequestFieldViolation(v)) + } + return br + case resourceInfoType: + var pb resourceInfoPb + if err := json.Unmarshal(d, &pb); err != nil { + return m // not a valid known type + } + return ResourceInfo(pb) + case helpType: + var pb helpPb + if err := json.Unmarshal(d, &pb); err != nil { + return m // not a valid known type + } + h := Help{} + for _, l := range pb.Links { + h.Links = append(h.Links, HelpLink(l)) + } + return h + default: + return m // unknown type + } +} diff --git a/apierr/errors.go b/apierr/errors.go index f9888783f..2ea8e2a39 100644 --- a/apierr/errors.go +++ b/apierr/errors.go @@ -16,10 +16,7 @@ import ( "github.com/databricks/databricks-sdk-go/logger/httplog" ) -const ( - errorInfoType string = "type.googleapis.com/google.rpc.ErrorInfo" -) - +// Deprecated: Use [ErrorDetails] instead. type ErrorDetail struct { Type string `json:"@type,omitempty"` Reason string `json:"reason,omitempty"` @@ -27,15 +24,28 @@ type ErrorDetail struct { Metadata map[string]string `json:"metadata,omitempty"` } -// APIError is a generic struct for an api error on databricks +// APIError represents a standard Databricks API error. type APIError struct { ErrorCode string Message string StatusCode int - Details []ErrorDetail + + errorDetails ErrorDetails + // If non-nil, the underlying error that should be returned by calling // errors.Unwrap on this error. unwrap error + + // Details is the sublist of error details that can be unmarshalled into + // the [ErrorDetail] type. + // + // Deprecated: Use [APIError.ErrorDetails] instead. + Details []ErrorDetail +} + +// ErrorDetails returns the error details of the APIError. +func (apiErr *APIError) ErrorDetails() ErrorDetails { + return apiErr.errorDetails } // Error returns the error message string. @@ -48,36 +58,36 @@ func IsMissing(err error) bool { return errors.Is(err, ErrNotFound) } -// GetErrorInfo returns all entries in the list of error details of type `ErrorInfo`. +// GetErrorInfo returns all entries in the list of error details of type +// `ErrorInfo`. +// +// Deprecated: Use [APIError.ErrorDetails] instead. func GetErrorInfo(err error) []ErrorDetail { - return getDetailsByType(err, errorInfoType) -} - -func getDetailsByType(err error, errorDetailType string) []ErrorDetail { var apiError *APIError if !errors.As(err, &apiError) { return nil } + filteredDetails := []ErrorDetail{} for _, detail := range apiError.Details { - if errorDetailType == detail.Type { + if errorInfoType == detail.Type { filteredDetails = append(filteredDetails, detail) } } return filteredDetails } -// IsMissing tells if it is missing resource +// IsMissing tells if it is missing resource. func (apiError *APIError) IsMissing() bool { return errors.Is(apiError, ErrNotFound) } -// IsTooManyRequests shows rate exceeded limits +// IsTooManyRequests shows rate exceeded limits. func (apiError *APIError) IsTooManyRequests() bool { return errors.Is(apiError, ErrTooManyRequests) } -// isRetriable returns true if error is retriable +// IsRetriable returns true if error is retriable. func (apiError *APIError) IsRetriable(ctx context.Context) bool { if apiError.IsTooManyRequests() { return true @@ -99,7 +109,7 @@ func (apiError *APIError) IsRetriable(ctx context.Context) bool { return false } -// NotFound returns properly formatted Not Found error +// NotFound returns properly formatted Not Found error. func NotFound(message string) *APIError { return &APIError{ ErrorCode: "NOT_FOUND", @@ -132,7 +142,8 @@ func GenericIOError(ue *url.Error) *APIError { } } -// GetAPIError inspects HTTP errors from the Databricks API for known transient errors. +// GetAPIError inspects HTTP errors from the Databricks API for known transient +// errors. func GetAPIError(ctx context.Context, resp common.ResponseWrapper) error { if resp.Response.StatusCode == 429 { return TooManyRequests() @@ -194,10 +205,10 @@ func parseErrorFromResponse(ctx context.Context, resp *http.Response, requestBod func standardErrorParser(ctx context.Context, resp *http.Response, responseBody []byte) *APIError { // Anonymous struct used to unmarshal JSON Databricks API error responses. var errorBody struct { - ErrorCode any `json:"error_code,omitempty"` // int or string - Message string `json:"message,omitempty"` - Details []ErrorDetail `json:"details,omitempty"` - API12Error string `json:"error,omitempty"` + ErrorCode any `json:"error_code,omitempty"` // int or string + Message string `json:"message,omitempty"` + API12Error string `json:"error,omitempty"` + RawDetails []json.RawMessage `json:"details,omitempty"` // The following fields are for scim api only. See RFC7644 section 3.7.3 // https://tools.ietf.org/html/rfc7644#section-3.7.3 @@ -228,12 +239,27 @@ func standardErrorParser(ctx context.Context, resp *http.Response, responseBody errorBody.ErrorCode = fmt.Sprintf("SCIM_%s", errorBody.ScimStatus) } - return &APIError{ + apierr := &APIError{ Message: errorBody.Message, ErrorCode: fmt.Sprintf("%v", errorBody.ErrorCode), StatusCode: resp.StatusCode, - Details: errorBody.Details, } + + // Parse the error details, dropping any that fail to unmarshal. + details := []any{} + for _, rd := range errorBody.RawDetails { + details = append(details, unmarshalDetails(rd)) + + // Deprecated: unmarshal ErrorDetail type for backwards compatibility + // with the previous behavior. + ed := ErrorDetail{} + if json.Unmarshal(rd, &ed) == nil { // ignore errors + apierr.Details = append(apierr.Details, ed) + } + } + apierr.errorDetails = parseErrorDetails(details) + + return apierr } var stringErrorRegex = regexp.MustCompile(`^([A-Z_]+): (.*)$`) diff --git a/apierr/errors_test.go b/apierr/errors_test.go index f850bdde6..61fa8d4fb 100644 --- a/apierr/errors_test.go +++ b/apierr/errors_test.go @@ -3,22 +3,27 @@ package apierr import ( "bytes" "context" + "errors" "fmt" "io" "net/http" "net/url" "testing" + "time" "github.com/databricks/databricks-sdk-go/common" - "github.com/stretchr/testify/assert" + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" ) -func TestAPIError_transientRegexMatches(t *testing.T) { +func TestAPIError_IsRetriable_transientRegexMatches(t *testing.T) { err := APIError{ Message: "worker env WorkerEnvId(workerenv-XXXXX) not found", } - assert.True(t, err.IsRetriable(context.Background())) + if !err.IsRetriable(context.Background()) { + t.Errorf("expected error to be retriable") + } } func makeTestReponseWrapper(statusCode int, resp string) common.ResponseWrapper { @@ -38,63 +43,72 @@ func makeTestReponseWrapper(statusCode int, resp string) common.ResponseWrapper } } -func TestAPIError_GetAPIError(t *testing.T) { +func TestGetAPIError_ErrorDetails(t *testing.T) { + want := ErrorDetails{ErrorInfo: &ErrorInfo{ + Reason: "reason", + Domain: "domain", + }} + APIError := &APIError{errorDetails: want} + + got := APIError.ErrorDetails() + + if diff := cmp.Diff(want, got); diff != "" { + t.Errorf("unexpected error details (-want +got):\n%s", diff) + } +} + +func TestGetAPIError(t *testing.T) { testCases := []struct { - name string - resp common.ResponseWrapper - wantErrorIs error - wantErrorCode string - wantMessage string - wantStatusCode int - wantDetails []ErrorDetail + name string + resp common.ResponseWrapper + want *APIError + wantErrorIs error }{ { - name: "empty response", - resp: makeTestReponseWrapper(http.StatusNotFound, ""), - wantErrorIs: ErrNotFound, - wantErrorCode: "", - wantMessage: "Not Found", - wantStatusCode: http.StatusNotFound, - }, - { - name: "happy path", - resp: makeTestReponseWrapper(http.StatusNotFound, `{"error_code": "RESOURCE_DOES_NOT_EXIST", "message": "Cluster abc does not exist"}`), - wantErrorIs: ErrResourceDoesNotExist, - wantErrorCode: "RESOURCE_DOES_NOT_EXIST", - wantMessage: "Cluster abc does not exist", - wantStatusCode: http.StatusNotFound, + name: "empty response", + resp: makeTestReponseWrapper(http.StatusNotFound, ""), + want: &APIError{ + ErrorCode: "", + Message: "Not Found", + StatusCode: http.StatusNotFound, + }, + wantErrorIs: ErrNotFound, }, { - name: "error details", - resp: makeTestReponseWrapper(http.StatusNotFound, `{"error_code": "RESOURCE_DOES_NOT_EXIST", "message": "Cluster abc does not exist", "details": [{"@type": "type", "reason": "reason", "domain": "domain", "metadata": {"key": "value"}}]}`), - wantErrorIs: ErrResourceDoesNotExist, - wantErrorCode: "RESOURCE_DOES_NOT_EXIST", - wantMessage: "Cluster abc does not exist", - wantStatusCode: http.StatusNotFound, - wantDetails: []ErrorDetail{ - { - Type: "type", - Reason: "reason", - Domain: "domain", - Metadata: map[string]string{"key": "value"}, - }, + name: "happy path", + resp: makeTestReponseWrapper(http.StatusNotFound, `{ + "error_code": "RESOURCE_DOES_NOT_EXIST", + "message": "Cluster abc does not exist" + }`), + want: &APIError{ + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + Message: "Cluster abc does not exist", + StatusCode: http.StatusNotFound, }, + wantErrorIs: ErrResourceDoesNotExist, }, { - name: "string error response", - resp: makeTestReponseWrapper(http.StatusBadRequest, `MALFORMED_REQUEST: vpc_endpoints malformed parameters: VPC Endpoint ... with use_case ... cannot be attached in ... list`), - wantErrorIs: ErrBadRequest, - wantErrorCode: "MALFORMED_REQUEST", - wantMessage: "vpc_endpoints malformed parameters: VPC Endpoint ... with use_case ... cannot be attached in ... list", - wantStatusCode: http.StatusBadRequest, + name: "string error response", + resp: makeTestReponseWrapper(http.StatusBadRequest, `MALFORMED_REQUEST: vpc_endpoints malformed parameters: VPC Endpoint ... with use_case ... cannot be attached in ... list`), + want: &APIError{ + ErrorCode: "MALFORMED_REQUEST", + Message: "vpc_endpoints malformed parameters: VPC Endpoint ... with use_case ... cannot be attached in ... list", + StatusCode: http.StatusBadRequest, + }, + wantErrorIs: ErrBadRequest, }, { - name: "numeric error code", - resp: makeTestReponseWrapper(http.StatusBadRequest, `{"error_code": 500, "message": "Cluster abc does not exist"}`), - wantErrorIs: ErrBadRequest, - wantErrorCode: "500", - wantMessage: "Cluster abc does not exist", - wantStatusCode: http.StatusBadRequest, + name: "numeric error code", + resp: makeTestReponseWrapper(http.StatusBadRequest, `{ + "error_code": 500, + "message": "Cluster abc does not exist"} + `), + want: &APIError{ + ErrorCode: "500", + Message: "Cluster abc does not exist", + StatusCode: http.StatusBadRequest, + }, + wantErrorIs: ErrBadRequest, }, { name: "private link redirect", @@ -109,10 +123,12 @@ func TestAPIError_GetAPIError(t *testing.T) { }, }, }, - wantErrorIs: ErrPermissionDenied, - wantErrorCode: "PRIVATE_LINK_VALIDATION_ERROR", - wantMessage: "The requested workspace has Azure Private Link enabled and is not accessible from the current network. Ensure that Azure Private Link is properly configured and that your device has access to the Azure Private Link endpoint. For more information, see https://learn.microsoft.com/en-us/azure/databricks/security/network/classic/private-link-standard#authentication-troubleshooting.", - wantStatusCode: http.StatusForbidden, + want: &APIError{ + ErrorCode: "PRIVATE_LINK_VALIDATION_ERROR", + Message: "The requested workspace has Azure Private Link enabled and is not accessible from the current network. Ensure that Azure Private Link is properly configured and that your device has access to the Azure Private Link endpoint. For more information, see https://learn.microsoft.com/en-us/azure/databricks/security/network/classic/private-link-standard#authentication-troubleshooting.", + StatusCode: http.StatusForbidden, + }, + wantErrorIs: ErrPermissionDenied, }, { name: "applies overrides", @@ -127,31 +143,351 @@ func TestAPIError_GetAPIError(t *testing.T) { }, }, DebugBytes: []byte{}, - ReadCloser: io.NopCloser(bytes.NewReader([]byte(`{"error_code": "INVALID_PARAMETER_VALUE", "message": "Cluster abc does not exist"}`))), + ReadCloser: io.NopCloser(bytes.NewReader([]byte(`{ + "error_code": "INVALID_PARAMETER_VALUE", + "message": "Cluster abc does not exist"} + `))), + }, + want: &APIError{ + ErrorCode: "INVALID_PARAMETER_VALUE", + Message: "Cluster abc does not exist", + StatusCode: http.StatusBadRequest, + }, + wantErrorIs: ErrResourceDoesNotExist, + }, + { + name: "unexpected error", + resp: makeTestReponseWrapper(http.StatusInternalServerError, `unparsable error message`), + want: &APIError{ + ErrorCode: "INTERNAL_SERVER_ERROR", + Message: "unable to parse response. This is likely a bug in the Databricks SDK for Go or the underlying REST API. Please report this issue with the following debugging information to the SDK issue tracker at https://github.com/databricks/databricks-sdk-go/issues. Request log:\n```\nGET /api/2.0/myservice\n> * Host: \n< 500 Internal Server Error\n< unparsable error message\n```", + StatusCode: http.StatusInternalServerError, + }, + }, + { + name: "all error details type", + resp: makeTestReponseWrapper(http.StatusNotFound, `{ + "error_code": "RESOURCE_DOES_NOT_EXIST", + "message": "Cluster abc does not exist", + "details": [ + { + "@type": "type.googleapis.com/google.rpc.ErrorInfo", + "reason": "reason", + "domain": "domain", + "metadata": {"k1": "v1", "k2": "v2"} + }, + { + "@type": "type.googleapis.com/google.rpc.RequestInfo", + "request_id": "req42", + "serving_data": "data" + }, + { + "@type": "type.googleapis.com/google.rpc.RetryInfo", + "retry_delay": {"seconds": 1, "nanos": 1} + }, + { + "@type": "type.googleapis.com/google.rpc.DebugInfo", + "stack_entries": ["entry1", "entry2"], + "detail": "detail" + }, + { + "@type": "type.googleapis.com/google.rpc.QuotaFailure", + "violations": [{"subject": "subject", "description": "description"}] + }, + { + "@type": "type.googleapis.com/google.rpc.PreconditionFailure", + "violations": [{"type": "type", "subject": "subject", "description": "description"}] + }, + { + "@type": "type.googleapis.com/google.rpc.BadRequest", + "field_violations": [{"field": "field", "description": "description"}] + }, + { + "@type": "type.googleapis.com/google.rpc.ResourceInfo", + "resource_type": "resource_type", + "resource_name": "resource_name", + "owner": "owner", + "description": "description" + }, + { + "@type": "type.googleapis.com/google.rpc.Help", + "links": [{"description": "description", "url": "url"}] + } + ] + }`), + want: &APIError{ + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + Message: "Cluster abc does not exist", + StatusCode: http.StatusNotFound, + Details: []ErrorDetail{ + { + Type: "type.googleapis.com/google.rpc.ErrorInfo", + Reason: "reason", + Domain: "domain", + Metadata: map[string]string{ + "k1": "v1", + "k2": "v2", + }, + }, + { + Type: "type.googleapis.com/google.rpc.RequestInfo", + }, + { + Type: "type.googleapis.com/google.rpc.RetryInfo", + }, + { + Type: "type.googleapis.com/google.rpc.DebugInfo", + }, + { + Type: "type.googleapis.com/google.rpc.QuotaFailure", + }, + { + Type: "type.googleapis.com/google.rpc.PreconditionFailure", + }, + { + Type: "type.googleapis.com/google.rpc.BadRequest", + }, + { + Type: "type.googleapis.com/google.rpc.ResourceInfo", + }, + { + Type: "type.googleapis.com/google.rpc.Help", + }, + }, + errorDetails: ErrorDetails{ + ErrorInfo: &ErrorInfo{ + Reason: "reason", + Domain: "domain", + Metadata: map[string]string{"k1": "v1", "k2": "v2"}, + }, + RequestInfo: &RequestInfo{ + RequestID: "req42", + ServingData: "data", + }, + RetryInfo: &RetryInfo{ + RetryDelay: time.Second + time.Nanosecond, + }, + DebugInfo: &DebugInfo{ + StackEntries: []string{"entry1", "entry2"}, + Detail: "detail", + }, + QuotaFailure: &QuotaFailure{ + Violations: []QuotaFailureViolation{{Subject: "subject", Description: "description"}}, + }, + PreconditionFailure: &PreconditionFailure{ + Violations: []PreconditionFailureViolation{{Type: "type", Subject: "subject", Description: "description"}}, + }, + BadRequest: &BadRequest{ + FieldViolations: []BadRequestFieldViolation{{Field: "field", Description: "description"}}, + }, + ResourceInfo: &ResourceInfo{ + ResourceType: "resource_type", + ResourceName: "resource_name", + Owner: "owner", + Description: "description", + }, + Help: &Help{ + Links: []HelpLink{{Description: "description", URL: "url"}}, + }, + }, + }, + }, + { + name: "unknown error details type", + resp: makeTestReponseWrapper(http.StatusNotFound, `{ + "error_code": "RESOURCE_DOES_NOT_EXIST", + "message": "Cluster abc does not exist", + "details": [ + { + "@type": "foo", + "reason": "reason" + } + ] + }`), + want: &APIError{ + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + Message: "Cluster abc does not exist", + StatusCode: http.StatusNotFound, + Details: []ErrorDetail{ + { + Type: "foo", + Reason: "reason", + }, + }, + errorDetails: ErrorDetails{ + UnknownDetails: []any{ + map[string]interface{}{ + "@type": "foo", + "reason": "reason", + }, + }, + }, + }, + }, + { + name: "invalid error details", + resp: makeTestReponseWrapper(http.StatusNotFound, `{ + "error_code": "RESOURCE_DOES_NOT_EXIST", + "message": "Cluster abc does not exist", + "details": [ + 42, + "foobar", + { + "foo": "bar" + }, + { + "@type": "type.googleapis.com/google.rpc.ErrorInfo", + "reason": 0 + }, + { + "@type": "type.googleapis.com/google.rpc.RequestInfo", + "request_id": 0 + }, + { + "@type": "type.googleapis.com/google.rpc.RetryInfo", + "retry_delay": 0 + }, + { + "@type": "type.googleapis.com/google.rpc.DebugInfo", + "stack_entries": 0 + }, + { + "@type": "type.googleapis.com/google.rpc.QuotaFailure", + "violations": 0 + }, + { + "@type": "type.googleapis.com/google.rpc.PreconditionFailure", + "violations": 0 + }, + { + "@type": "type.googleapis.com/google.rpc.BadRequest", + "field_violations": 0 + }, + { + "@type": "type.googleapis.com/google.rpc.ResourceInfo", + "resource_type": 0 + }, + { + "@type": "type.googleapis.com/google.rpc.Help", + "links": 0 + } + ] + }`), + want: &APIError{ + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + Message: "Cluster abc does not exist", + StatusCode: http.StatusNotFound, + Details: []ErrorDetail{ + {}, + // No ErrorInfo because it fails to unmarshal. + {Type: "type.googleapis.com/google.rpc.RequestInfo"}, + {Type: "type.googleapis.com/google.rpc.RetryInfo"}, + {Type: "type.googleapis.com/google.rpc.DebugInfo"}, + {Type: "type.googleapis.com/google.rpc.QuotaFailure"}, + {Type: "type.googleapis.com/google.rpc.PreconditionFailure"}, + {Type: "type.googleapis.com/google.rpc.BadRequest"}, + {Type: "type.googleapis.com/google.rpc.ResourceInfo"}, + {Type: "type.googleapis.com/google.rpc.Help"}, + }, + errorDetails: ErrorDetails{ + UnknownDetails: []any{ + 42.0, + "foobar", + map[string]interface{}{ + "foo": "bar", + }, + map[string]interface{}{ + "@type": "type.googleapis.com/google.rpc.ErrorInfo", + "reason": 0.0, + }, + map[string]interface{}{ + "@type": "type.googleapis.com/google.rpc.RequestInfo", + "request_id": 0.0, + }, + map[string]interface{}{ + "@type": "type.googleapis.com/google.rpc.RetryInfo", + "retry_delay": 0.0, + }, + map[string]interface{}{ + "@type": "type.googleapis.com/google.rpc.DebugInfo", + "stack_entries": 0.0, + }, + map[string]interface{}{ + "@type": "type.googleapis.com/google.rpc.QuotaFailure", + "violations": 0.0, + }, + map[string]interface{}{ + "@type": "type.googleapis.com/google.rpc.PreconditionFailure", + "violations": 0.0, + }, + map[string]interface{}{ + "@type": "type.googleapis.com/google.rpc.BadRequest", + "field_violations": 0.0, + }, + map[string]interface{}{ + "@type": "type.googleapis.com/google.rpc.ResourceInfo", + "resource_type": 0.0, + }, + map[string]interface{}{ + "@type": "type.googleapis.com/google.rpc.Help", + "links": 0.0, + }, + }, + }, }, - wantErrorIs: ErrResourceDoesNotExist, - wantErrorCode: "INVALID_PARAMETER_VALUE", - wantMessage: "Cluster abc does not exist", - wantStatusCode: http.StatusBadRequest, }, { - name: "unexpected error", - resp: makeTestReponseWrapper(http.StatusInternalServerError, `unparsable error message`), - wantErrorCode: "INTERNAL_SERVER_ERROR", - wantMessage: "unable to parse response. This is likely a bug in the Databricks SDK for Go or the underlying REST API. Please report this issue with the following debugging information to the SDK issue tracker at https://github.com/databricks/databricks-sdk-go/issues. Request log:\n```\nGET /api/2.0/myservice\n> * Host: \n< 500 Internal Server Error\n< unparsable error message\n```", - wantStatusCode: http.StatusInternalServerError, + name: "only keep the last error details of a type", + resp: makeTestReponseWrapper(http.StatusNotFound, `{ + "error_code": "RESOURCE_DOES_NOT_EXIST", + "message": "Cluster abc does not exist", + "details": [ + { + "@type": "type.googleapis.com/google.rpc.ErrorInfo", + "reason": "first" + }, + { + "@type": "type.googleapis.com/google.rpc.ErrorInfo", + "reason": "second" + } + ] + }`), + want: &APIError{ + ErrorCode: "RESOURCE_DOES_NOT_EXIST", + Message: "Cluster abc does not exist", + StatusCode: http.StatusNotFound, + Details: []ErrorDetail{ + { + Type: "type.googleapis.com/google.rpc.ErrorInfo", + Reason: "first", + }, + { + Type: "type.googleapis.com/google.rpc.ErrorInfo", + Reason: "second", + }, + }, + errorDetails: ErrorDetails{ + ErrorInfo: &ErrorInfo{ + Reason: "second", + }, + }, + }, }, } for _, tc := range testCases { t.Run(tc.name, func(t *testing.T) { - got := GetAPIError(context.Background(), tc.resp).(*APIError) - assert.Equal(t, tc.wantErrorCode, got.ErrorCode) - assert.Equal(t, tc.wantMessage, got.Message) - assert.Equal(t, tc.wantStatusCode, got.StatusCode) - assert.Equal(t, tc.wantDetails, got.Details) - if tc.wantErrorIs != nil { - assert.ErrorIs(t, got, tc.wantErrorIs) + got := GetAPIError(context.Background(), tc.resp) + + opts := cmp.Options{ + cmp.AllowUnexported(APIError{}), // to check ErrorDetails + cmpopts.IgnoreFields(APIError{}, "unwrap"), // tested via wantErrorIs + } + if diff := cmp.Diff(tc.want, got, opts); diff != "" { + t.Errorf("unexpected error (-want +got):\n%s", diff) + } + if tc.wantErrorIs != nil && !errors.Is(got, tc.wantErrorIs) { + t.Errorf("errors.Is(%q, %q) failed", got, tc.wantErrorIs) } }) } From cdb28002afacb8b762348534a4c4040a9f19c24b Mon Sep 17 00:00:00 2001 From: vuong-nguyen <44292934+nkvuong@users.noreply.github.com> Date: Fri, 28 Feb 2025 21:04:07 +0800 Subject: [PATCH 21/54] [Internal] add `poll.SimpleError` to mock waiter objects returning errors (#1155) ## What changes are proposed in this pull request? in #769, `poll.Simple` method was added to simplify mocking of waiter objects. However, this only allows testing of happy path where no error is returned. This PR adds the equivalent `poll.SimpleError` method to returns an error instead. ## How is this tested? - [x] Unit test added --- NEXT_CHANGELOG.md | 1 + qa/poll/poll.go | 6 ++++++ qa/poll/poll_test.go | 11 +++++++++++ 3 files changed, 18 insertions(+) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 132bddd00..599681aaf 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -12,6 +12,7 @@ ### Internal Changes +* Add `poll.SimpleError` to mock waiter objects returning errors ([#1155](https://github.com/databricks/databricks-sdk-go/pull/1155)) * Refactor `APIError` to expose different types of error details ([#1153](https://github.com/databricks/databricks-sdk-go/pull/1153)). * Update Jobs ListJobs API to support paginated responses ([#1150](https://github.com/databricks/databricks-sdk-go/pull/1150)) * Introduce automated tagging ([#1148](https://github.com/databricks/databricks-sdk-go/pull/1148)). diff --git a/qa/poll/poll.go b/qa/poll/poll.go index 740ad3581..9399c3072 100644 --- a/qa/poll/poll.go +++ b/qa/poll/poll.go @@ -9,3 +9,9 @@ func Simple[R any](r R) PollFunc[R] { return &r, nil } } + +func SimpleError[R any](err error) PollFunc[R] { + return func(_ time.Duration, _ func(*R)) (*R, error) { + return nil, err + } +} diff --git a/qa/poll/poll_test.go b/qa/poll/poll_test.go index 2fc9e82cd..e5de3ddc3 100644 --- a/qa/poll/poll_test.go +++ b/qa/poll/poll_test.go @@ -1,6 +1,7 @@ package poll_test import ( + "errors" "testing" "github.com/databricks/databricks-sdk-go/qa/poll" @@ -18,3 +19,13 @@ func TestSimple(t *testing.T) { assert.NoError(t, err) assert.Equal(t, "test", res.Id) } + +func TestSimpleError(t *testing.T) { + waiter := sql.WaitGetWarehouseRunning[int]{ + Poll: poll.SimpleError[sql.GetWarehouseResponse](errors.New("test")), + } + _, err := waiter.Get() + if assert.Error(t, err) { + assert.Equal(t, errors.New("test"), err) + } +} From 7bf83725d18b1312405175407d58bcb2667a2819 Mon Sep 17 00:00:00 2001 From: Giorgi Kikolashvili <47174341+gkiko10@users.noreply.github.com> Date: Mon, 3 Mar 2025 14:07:06 +0100 Subject: [PATCH 22/54] [Internal] Update Jobs `list_runs` function to support paginated responses (#1151) ## What changes are proposed in this pull request? Introduces logic in the extension for jobs ListRuns call. The extended logic accounts for the new response format of API 2.2. API 2.1 format returns all tasks and job_cluster for each run in the runs list. API 2.2 format truncates tasks and job_cluster to 100 elements. The extended ListRuns logic calls GetRun for each run in the list to populate the full list of tasks and job_clusters. I added logic that reads runs from the list and produces custom iterator struct `expandedRunsIterator` that is supposed to mimic python generators. The goal is to only read necessary elements from the API endpoint and not more. ## How is this tested? Unit tests and manual tests. Manual tests were done in two modes: using API 2.2 and using API 2.1. So this code is compatible with both API versions. --------- Co-authored-by: Renaud Hartert --- NEXT_CHANGELOG.md | 1 + service/jobs/ext_api.go | 51 ++++++ service/jobs/ext_api_test.go | 297 +++++++++++++++++++++++++++++++++++ 3 files changed, 349 insertions(+) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 599681aaf..4675e0bff 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -12,6 +12,7 @@ ### Internal Changes +* Update Jobs ListRuns API to support paginated responses ([#1151](https://github.com/databricks/databricks-sdk-go/pull/1151)) * Add `poll.SimpleError` to mock waiter objects returning errors ([#1155](https://github.com/databricks/databricks-sdk-go/pull/1155)) * Refactor `APIError` to expose different types of error details ([#1153](https://github.com/databricks/databricks-sdk-go/pull/1153)). * Update Jobs ListJobs API to support paginated responses ([#1150](https://github.com/databricks/databricks-sdk-go/pull/1150)) diff --git a/service/jobs/ext_api.go b/service/jobs/ext_api.go index eb96b6ca0..b4761565f 100644 --- a/service/jobs/ext_api.go +++ b/service/jobs/ext_api.go @@ -58,6 +58,57 @@ func (e *expandedJobsIterator) Next(ctx context.Context) (BaseJob, error) { return job, nil } +// ListRuns fetches a list of job runs. +// If expand_tasks is true, the response will include the full list of tasks and job_clusters for each run. +// This function handles pagination two ways: paginates all the runs in the list and paginates all the tasks and job_clusters for each run. +func (a *JobsAPI) ListRuns(ctx context.Context, request ListRunsRequest) listing.Iterator[BaseRun] { + runsList := a.jobsImpl.ListRuns(ctx, request) + + if !request.ExpandTasks { + return runsList + } + + return &expandedRunsIterator{ + originalIterator: runsList, + service: a, + } +} + +// expandedRunsIterator is a custom iterator that for each run calls runs/get in order to fetch full list of tasks and job_clusters. +type expandedRunsIterator struct { + originalIterator listing.Iterator[BaseRun] + service *JobsAPI +} + +func (e *expandedRunsIterator) HasNext(ctx context.Context) bool { + return e.originalIterator.HasNext(ctx) +} + +func (e *expandedRunsIterator) Next(ctx context.Context) (BaseRun, error) { + run, err := e.originalIterator.Next(ctx) + if err != nil { + return BaseRun{}, err + } + if !run.HasMore { + return run, nil + } + + // Fully fetch all top level arrays for the job run. + getRunRequest := GetRunRequest{RunId: run.RunId} + fullRun, err := e.service.GetRun(ctx, getRunRequest) + if err != nil { + return BaseRun{}, err + } + + run.Tasks = fullRun.Tasks + run.JobClusters = fullRun.JobClusters + run.JobParameters = fullRun.JobParameters + run.RepairHistory = fullRun.RepairHistory + run.HasMore = false + + return run, nil +} + // GetRun retrieves a run based on the provided request. // It handles pagination if the run contains multiple iterations or tasks. func (a *JobsAPI) GetRun(ctx context.Context, request GetRunRequest) (*Run, error) { diff --git a/service/jobs/ext_api_test.go b/service/jobs/ext_api_test.go index a7af26668..8cb8a1c13 100644 --- a/service/jobs/ext_api_test.go +++ b/service/jobs/ext_api_test.go @@ -1281,3 +1281,300 @@ func TestListJobs(t *testing.T) { assert.EqualValues(t, "job_400", allJobs[3].Settings.Name) }) } + +func TestListRuns(t *testing.T) { + t.Run("runs list with no task expansion", func(t *testing.T) { + ctx := context.Background() + + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.2/jobs/runs/list?", + Response: ListRunsResponse{ + Runs: []BaseRun{ + { + RunId: 100, + RunName: "run100", + }, + { + RunId: 200, + RunName: "run200", + JobParameters: []JobParameter{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, + }, + { + RunId: 300, + RunName: "run300", + }, + }, + NextPageToken: "tokenToSecondPage", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/runs/list?page_token=tokenToSecondPage", + Response: ListRunsResponse{ + Runs: []BaseRun{ + { + RunId: 400, + RunName: "run400", + RepairHistory: []RepairHistoryItem{ + { + Id: 410, + }, + { + Id: 411, + }, + }, + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/jobs/runs/list?", + Response: ListRunsResponse{ + Runs: []BaseRun{ + { + RunId: 100, + RunName: "run100", + }, + { + RunId: 200, + RunName: "run200", + JobParameters: []JobParameter{ + { + Name: "param1", + Default: "default1", + }, + { + Name: "param2", + Default: "default2", + }, + }, + }, + { + RunId: 300, + RunName: "run300", + }, + }, + NextPageToken: "tokenToSecondPage", + }, + }, + { + Method: "GET", + Resource: "/api/2.1/jobs/runs/list?page_token=tokenToSecondPage", + Response: ListRunsResponse{ + Runs: []BaseRun{ + { + RunId: 400, + RunName: "run400", + RepairHistory: []RepairHistoryItem{ + { + Id: 410, + }, + { + Id: 411, + }, + }, + }, + }, + }, + }, + } + + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + runsList := api.ListRuns(ctx, ListRunsRequest{}) + var allRuns []BaseRun + for runsList.HasNext(ctx) { + run, err := runsList.Next(ctx) + assert.NoError(t, err) + assert.NotEmpty(t, run.RunId) + assert.Empty(t, run.HasMore) + allRuns = append(allRuns, run) + } + + assert.EqualValues(t, len(allRuns), 4) + assert.EqualValues(t, allRuns[0].RunId, 100) + assert.EqualValues(t, allRuns[2].RunId, 300) + assert.EqualValues(t, allRuns[3].RunId, 400) + assert.EqualValues(t, allRuns[3].RunName, "run400") + }) + + t.Run("runs list with with task expansion", func(t *testing.T) { + ctx := context.Background() + + var requestMocks qa.HTTPFixtures = []qa.HTTPFixture{ + { + Method: "GET", + Resource: "/api/2.2/jobs/runs/list?expand_tasks=true", + Response: ListRunsResponse{ + Runs: []BaseRun{ + { + RunId: 100, + Tasks: []RunTask{ + {TaskKey: "taskkey101"}, + {TaskKey: "taskkey102"}, + }, + HasMore: true, + }, + { + RunId: 200, + Tasks: []RunTask{ + {TaskKey: "taskkey201"}, + }, + }, + { + RunId: 300, + Tasks: []RunTask{ + {TaskKey: "taskkey301"}, + }, + }, + }, + NextPageToken: "tokenToSecondPage", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/runs/list?expand_tasks=true&page_token=tokenToSecondPage", + Response: ListRunsResponse{ + Runs: []BaseRun{ + { + RunId: 400, + Tasks: []RunTask{ + {TaskKey: "taskkey401"}, + {TaskKey: "taskkey402"}, + }, + HasMore: true, + }, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/runs/get?run_id=100", + Response: Run{ + RunId: 100, + Tasks: []RunTask{ + {TaskKey: "taskkey101"}, + {TaskKey: "taskkey102"}, + }, + NextPageToken: "tokenToSecondPage_100", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/runs/get?page_token=tokenToSecondPage_100&run_id=100", + Response: Run{ + RunId: 100, + Tasks: []RunTask{ + {TaskKey: "taskkey103"}, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/runs/get?run_id=400", + Response: Run{ + RunId: 400, + Tasks: []RunTask{ + {TaskKey: "taskkey401"}, + {TaskKey: "taskkey403"}, + }, + NextPageToken: "tokenToSecondPage_400", + }, + }, + { + Method: "GET", + Resource: "/api/2.2/jobs/runs/get?page_token=tokenToSecondPage_400&run_id=400", + Response: Run{ + RunId: 400, + Tasks: []RunTask{ + {TaskKey: "taskkey402"}, + {TaskKey: "taskkey404"}, + }, + }, + }, + { + Method: "GET", + Resource: "/api/2.1/jobs/runs/list?expand_tasks=true", + Response: ListRunsResponse{ + Runs: []BaseRun{ + { + RunId: 100, + Tasks: []RunTask{ + {TaskKey: "taskkey101"}, + {TaskKey: "taskkey102"}, + {TaskKey: "taskkey103"}, + }, + }, + { + RunId: 200, + Tasks: []RunTask{ + {TaskKey: "taskkey201"}, + }, + }, + { + RunId: 300, + Tasks: []RunTask{ + {TaskKey: "taskkey301"}, + }, + }, + { + RunId: 400, + Tasks: []RunTask{ + {TaskKey: "taskkey401"}, + {TaskKey: "taskkey403"}, + {TaskKey: "taskkey402"}, + {TaskKey: "taskkey404"}, + }, + }, + }, + }, + }, + } + + client, server := requestMocks.Client(t) + defer server.Close() + + mockJobsImpl := &jobsImpl{ + client: client, + } + api := &JobsAPI{jobsImpl: *mockJobsImpl} + + runsList := api.ListRuns(ctx, ListRunsRequest{ExpandTasks: true}) + var allRuns []BaseRun + for runsList.HasNext(ctx) { + run, err := runsList.Next(ctx) + assert.NoError(t, err) + assert.NotEmpty(t, run.RunId) + assert.Empty(t, run.HasMore) + allRuns = append(allRuns, run) + } + + assert.EqualValues(t, 4, len(allRuns)) + assert.EqualValues(t, 100, allRuns[0].RunId) + assert.EqualValues(t, 300, allRuns[2].RunId) + assert.EqualValues(t, 400, allRuns[3].RunId) + assert.Equal(t, 3, len(allRuns[0].Tasks)) + assert.EqualValues(t, "taskkey401", allRuns[3].Tasks[0].TaskKey) + assert.EqualValues(t, "taskkey403", allRuns[3].Tasks[1].TaskKey) + assert.EqualValues(t, "taskkey402", allRuns[3].Tasks[2].TaskKey) + assert.EqualValues(t, "taskkey404", allRuns[3].Tasks[3].TaskKey) + }) +} From 62307c9664b082275182c90f9b332a542ec40e58 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Mon, 3 Mar 2025 16:42:16 +0100 Subject: [PATCH 23/54] [Internal] Update OpenAPI spec (#1163) ## What changes are proposed in this pull request? Update OpenAPI spec ## How is this tested? N/A --- .codegen/_openapi_sha | 2 +- NEXT_CHANGELOG.md | 51 ++ .../dashboards/mock_genie_interface.go | 118 ++++ .../service/ml/mock_experiments_interface.go | 14 +- .../sharing/mock_providers_interface.go | 119 ++++ .../service/sharing/mock_shares_interface.go | 59 +- service/apps/model.go | 4 + service/billing/model.go | 13 +- service/catalog/model.go | 28 +- service/cleanrooms/model.go | 5 +- service/compute/model.go | 136 +++- service/dashboards/api.go | 56 +- service/dashboards/impl.go | 10 + service/dashboards/interface.go | 17 +- service/dashboards/model.go | 288 ++++----- service/jobs/model.go | 102 ++- service/ml/api.go | 68 +- service/ml/impl.go | 36 +- service/ml/interface.go | 62 +- service/ml/model.go | 233 +++---- service/oauth2/impl.go | 3 +- service/oauth2/model.go | 20 +- service/serving/api.go | 10 +- service/serving/impl.go | 57 +- service/serving/model.go | 6 + service/sharing/api.go | 36 +- service/sharing/impl.go | 35 +- service/sharing/interface.go | 17 +- service/sharing/model.go | 604 ++++++++++++++++-- service/vectorsearch/model.go | 2 + service/workspace/model.go | 48 +- 31 files changed, 1641 insertions(+), 618 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 562b72fcc..02c4790ad 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -99f644e72261ef5ecf8d74db20f4b7a1e09723cc \ No newline at end of file +e5c870006a536121442cfd2441bdc8a5fb76ae1e \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 4675e0bff..71462987c 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -21,3 +21,54 @@ * Update Jobs GetRun API to support paginated responses ([#1132](https://github.com/databricks/databricks-sdk-go/pull/1132)). ### API Changes +* Added `GetSpace` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Added `ListProviderShareAssets` method for [w.Providers](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#ProvidersAPI) workspace-level service. +* Added `BudgetPolicyId` and `EffectiveBudgetPolicyId` fields for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). +* Added `Policy` field for [billing.CreateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#CreateBudgetPolicyRequest). +* Added `DatabricksGcpServiceAccount` field for [catalog.ValidateCredentialRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ValidateCredentialRequest). +* Added `AttachmentId` field for [dashboards.GenieAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAttachment). +* Added `ConversationId` field for [dashboards.GenieConversation](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieConversation). +* Added `MessageId` field for [dashboards.GenieMessage](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieMessage). +* Added `Description`, `Id`, `LastUpdatedTimestamp`, `Query`, `QueryResultMetadata` and `Title` fields for [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). +* Added `GenAiComputeTask` field for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). +* Added `GenAiComputeTask` field for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). +* Added `GenAiComputeTask` field for [jobs.Task](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Task). +* Added `RunName` field for [ml.CreateRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateRun). +* Added `RunName` field for [ml.RunInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#RunInfo). +* Added `RunName` field for [ml.UpdateRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#UpdateRun). +* Added `Lifetime` field for [oauth2.CreateServicePrincipalSecretRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateServicePrincipalSecretRequest). +* Added `ExpireTime` field for [oauth2.CreateServicePrincipalSecretResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateServicePrincipalSecretResponse). +* Added `ExpireTime` field for [oauth2.SecretInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#SecretInfo). +* Added `InstanceProfileArn` field for [serving.AmazonBedrockConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AmazonBedrockConfig). +* Added `Add`, `Principal` and `Remove` fields for [sharing.PermissionsChange](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#PermissionsChange). +* Added `ColumnsToRerank` field for [vectorsearch.QueryVectorIndexRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#QueryVectorIndexRequest). +* Added `Oracle` and `Teradata` enum values for [catalog.ConnectionType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ConnectionType). +* Added `FunctionArgumentsInvalidTypeException` and `MessageCancelledWhileExecutingException` enum values for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). +* Added `Waiting` enum value for [jobs.RunLifecycleStateV2State](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunLifecycleStateV2State). +* Added `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). +* Added `OauthClientCredentials` enum value for [sharing.AuthenticationType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#AuthenticationType). +* Added `Raw` enum value for [workspace.ExportFormat](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/workspace#ExportFormat). +* [Breaking] Changed `GetByName` method for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service to return [ml.GetExperimentByNameResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#GetExperimentByNameResponse). +* [Breaking] Changed `LogInputs` method for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service with new required argument order. +* [Breaking] Changed `SharePermissions` and `UpdatePermissions` methods for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service return type to become non-empty. +* [Breaking] Changed `SharePermissions` method for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service to return [sharing.GetSharePermissionsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#GetSharePermissionsResponse). +* [Breaking] Changed `UpdatePermissions` method for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service to return [sharing.UpdateSharePermissionsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#UpdateSharePermissionsResponse). +* Changed `PolicyId` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy) to no longer be required. +* [Breaking] Changed `PolicyId` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy) to no longer be required. +* [Breaking] Changed `Partitions` field for [cleanrooms.CleanRoomAssetTableLocalDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetTableLocalDetails) to type [cleanrooms.PartitionList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#PartitionList). +* [Breaking] Changed `Query` field for [dashboards.GenieAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAttachment) to type [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). +* Changed `Digest`, `Name`, `Source` and `SourceType` fields for [ml.Dataset](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Dataset) to be required. +* [Breaking] Changed `Digest`, `Name`, `Source` and `SourceType` fields for [ml.Dataset](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Dataset) to be required. +* [Breaking] Changed `Dataset` field for [ml.DatasetInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#DatasetInput) to be required. +* Changed `Dataset` field for [ml.DatasetInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#DatasetInput) to be required. +* Changed `Key` and `Value` fields for [ml.InputTag](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#InputTag) to be required. +* [Breaking] Changed `Key` and `Value` fields for [ml.InputTag](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#InputTag) to be required. +* [Breaking] Changed `ViewType` field for [ml.ListExperimentsRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ListExperimentsRequest) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). +* [Breaking] Changed `RunId` field for [ml.LogInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogInputs) to be required. +* [Breaking] Changed `ViewType` field for [ml.SearchExperiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchExperiments) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). +* [Breaking] Changed `RunViewType` field for [ml.SearchRuns](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchRuns) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). +* [Breaking] Removed `CustomTags` and `PolicyName` fields for [billing.CreateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#CreateBudgetPolicyRequest). +* [Breaking] Removed `CachedQuerySchema`, `Description`, `Id`, `InstructionId`, `InstructionTitle`, `LastUpdatedTimestamp`, `Query`, `StatementId` and `Title` fields for [dashboards.QueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#QueryAttachment). +* [Breaking] Removed `MaxResults` and `PageToken` fields for [sharing.UpdateSharePermissions](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#UpdateSharePermissions). +* [Breaking] Removed `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.SearchExperimentsViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchExperimentsViewType). +* [Breaking] Removed `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.SearchRunsRunViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchRunsRunViewType). diff --git a/experimental/mocks/service/dashboards/mock_genie_interface.go b/experimental/mocks/service/dashboards/mock_genie_interface.go index 7a79503f6..fef6abd67 100644 --- a/experimental/mocks/service/dashboards/mock_genie_interface.go +++ b/experimental/mocks/service/dashboards/mock_genie_interface.go @@ -579,6 +579,124 @@ func (_c *MockGenieInterface_GetMessageQueryResultBySpaceIdAndConversationIdAndM return _c } +// GetSpace provides a mock function with given fields: ctx, request +func (_m *MockGenieInterface) GetSpace(ctx context.Context, request dashboards.GenieGetSpaceRequest) (*dashboards.GenieSpace, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetSpace") + } + + var r0 *dashboards.GenieSpace + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetSpaceRequest) (*dashboards.GenieSpace, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetSpaceRequest) *dashboards.GenieSpace); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieSpace) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieGetSpaceRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetSpace_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSpace' +type MockGenieInterface_GetSpace_Call struct { + *mock.Call +} + +// GetSpace is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GenieGetSpaceRequest +func (_e *MockGenieInterface_Expecter) GetSpace(ctx interface{}, request interface{}) *MockGenieInterface_GetSpace_Call { + return &MockGenieInterface_GetSpace_Call{Call: _e.mock.On("GetSpace", ctx, request)} +} + +func (_c *MockGenieInterface_GetSpace_Call) Run(run func(ctx context.Context, request dashboards.GenieGetSpaceRequest)) *MockGenieInterface_GetSpace_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GenieGetSpaceRequest)) + }) + return _c +} + +func (_c *MockGenieInterface_GetSpace_Call) Return(_a0 *dashboards.GenieSpace, _a1 error) *MockGenieInterface_GetSpace_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetSpace_Call) RunAndReturn(run func(context.Context, dashboards.GenieGetSpaceRequest) (*dashboards.GenieSpace, error)) *MockGenieInterface_GetSpace_Call { + _c.Call.Return(run) + return _c +} + +// GetSpaceBySpaceId provides a mock function with given fields: ctx, spaceId +func (_m *MockGenieInterface) GetSpaceBySpaceId(ctx context.Context, spaceId string) (*dashboards.GenieSpace, error) { + ret := _m.Called(ctx, spaceId) + + if len(ret) == 0 { + panic("no return value specified for GetSpaceBySpaceId") + } + + var r0 *dashboards.GenieSpace + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*dashboards.GenieSpace, error)); ok { + return rf(ctx, spaceId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *dashboards.GenieSpace); ok { + r0 = rf(ctx, spaceId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieSpace) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, spaceId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetSpaceBySpaceId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetSpaceBySpaceId' +type MockGenieInterface_GetSpaceBySpaceId_Call struct { + *mock.Call +} + +// GetSpaceBySpaceId is a helper method to define mock.On call +// - ctx context.Context +// - spaceId string +func (_e *MockGenieInterface_Expecter) GetSpaceBySpaceId(ctx interface{}, spaceId interface{}) *MockGenieInterface_GetSpaceBySpaceId_Call { + return &MockGenieInterface_GetSpaceBySpaceId_Call{Call: _e.mock.On("GetSpaceBySpaceId", ctx, spaceId)} +} + +func (_c *MockGenieInterface_GetSpaceBySpaceId_Call) Run(run func(ctx context.Context, spaceId string)) *MockGenieInterface_GetSpaceBySpaceId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockGenieInterface_GetSpaceBySpaceId_Call) Return(_a0 *dashboards.GenieSpace, _a1 error) *MockGenieInterface_GetSpaceBySpaceId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetSpaceBySpaceId_Call) RunAndReturn(run func(context.Context, string) (*dashboards.GenieSpace, error)) *MockGenieInterface_GetSpaceBySpaceId_Call { + _c.Call.Return(run) + return _c +} + // StartConversation provides a mock function with given fields: ctx, genieStartConversationMessageRequest func (_m *MockGenieInterface) StartConversation(ctx context.Context, genieStartConversationMessageRequest dashboards.GenieStartConversationMessageRequest) (*dashboards.WaitGetMessageGenieCompleted[dashboards.GenieStartConversationResponse], error) { ret := _m.Called(ctx, genieStartConversationMessageRequest) diff --git a/experimental/mocks/service/ml/mock_experiments_interface.go b/experimental/mocks/service/ml/mock_experiments_interface.go index 0743f56c9..e7cc6155a 100644 --- a/experimental/mocks/service/ml/mock_experiments_interface.go +++ b/experimental/mocks/service/ml/mock_experiments_interface.go @@ -343,23 +343,23 @@ func (_c *MockExperimentsInterface_DeleteTag_Call) RunAndReturn(run func(context } // GetByName provides a mock function with given fields: ctx, request -func (_m *MockExperimentsInterface) GetByName(ctx context.Context, request ml.GetByNameRequest) (*ml.GetExperimentResponse, error) { +func (_m *MockExperimentsInterface) GetByName(ctx context.Context, request ml.GetByNameRequest) (*ml.GetExperimentByNameResponse, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for GetByName") } - var r0 *ml.GetExperimentResponse + var r0 *ml.GetExperimentByNameResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, ml.GetByNameRequest) (*ml.GetExperimentResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, ml.GetByNameRequest) (*ml.GetExperimentByNameResponse, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, ml.GetByNameRequest) *ml.GetExperimentResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, ml.GetByNameRequest) *ml.GetExperimentByNameResponse); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*ml.GetExperimentResponse) + r0 = ret.Get(0).(*ml.GetExperimentByNameResponse) } } @@ -391,12 +391,12 @@ func (_c *MockExperimentsInterface_GetByName_Call) Run(run func(ctx context.Cont return _c } -func (_c *MockExperimentsInterface_GetByName_Call) Return(_a0 *ml.GetExperimentResponse, _a1 error) *MockExperimentsInterface_GetByName_Call { +func (_c *MockExperimentsInterface_GetByName_Call) Return(_a0 *ml.GetExperimentByNameResponse, _a1 error) *MockExperimentsInterface_GetByName_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockExperimentsInterface_GetByName_Call) RunAndReturn(run func(context.Context, ml.GetByNameRequest) (*ml.GetExperimentResponse, error)) *MockExperimentsInterface_GetByName_Call { +func (_c *MockExperimentsInterface_GetByName_Call) RunAndReturn(run func(context.Context, ml.GetByNameRequest) (*ml.GetExperimentByNameResponse, error)) *MockExperimentsInterface_GetByName_Call { _c.Call.Return(run) return _c } diff --git a/experimental/mocks/service/sharing/mock_providers_interface.go b/experimental/mocks/service/sharing/mock_providers_interface.go index b3c75dd3a..4f3cb5aca 100644 --- a/experimental/mocks/service/sharing/mock_providers_interface.go +++ b/experimental/mocks/service/sharing/mock_providers_interface.go @@ -403,6 +403,125 @@ func (_c *MockProvidersInterface_ListAll_Call) RunAndReturn(run func(context.Con return _c } +// ListProviderShareAssets provides a mock function with given fields: ctx, request +func (_m *MockProvidersInterface) ListProviderShareAssets(ctx context.Context, request sharing.ListProviderShareAssetsRequest) (*sharing.ListProviderShareAssetsResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ListProviderShareAssets") + } + + var r0 *sharing.ListProviderShareAssetsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sharing.ListProviderShareAssetsRequest) (*sharing.ListProviderShareAssetsResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sharing.ListProviderShareAssetsRequest) *sharing.ListProviderShareAssetsResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sharing.ListProviderShareAssetsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sharing.ListProviderShareAssetsRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockProvidersInterface_ListProviderShareAssets_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListProviderShareAssets' +type MockProvidersInterface_ListProviderShareAssets_Call struct { + *mock.Call +} + +// ListProviderShareAssets is a helper method to define mock.On call +// - ctx context.Context +// - request sharing.ListProviderShareAssetsRequest +func (_e *MockProvidersInterface_Expecter) ListProviderShareAssets(ctx interface{}, request interface{}) *MockProvidersInterface_ListProviderShareAssets_Call { + return &MockProvidersInterface_ListProviderShareAssets_Call{Call: _e.mock.On("ListProviderShareAssets", ctx, request)} +} + +func (_c *MockProvidersInterface_ListProviderShareAssets_Call) Run(run func(ctx context.Context, request sharing.ListProviderShareAssetsRequest)) *MockProvidersInterface_ListProviderShareAssets_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sharing.ListProviderShareAssetsRequest)) + }) + return _c +} + +func (_c *MockProvidersInterface_ListProviderShareAssets_Call) Return(_a0 *sharing.ListProviderShareAssetsResponse, _a1 error) *MockProvidersInterface_ListProviderShareAssets_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockProvidersInterface_ListProviderShareAssets_Call) RunAndReturn(run func(context.Context, sharing.ListProviderShareAssetsRequest) (*sharing.ListProviderShareAssetsResponse, error)) *MockProvidersInterface_ListProviderShareAssets_Call { + _c.Call.Return(run) + return _c +} + +// ListProviderShareAssetsByProviderNameAndShareName provides a mock function with given fields: ctx, providerName, shareName +func (_m *MockProvidersInterface) ListProviderShareAssetsByProviderNameAndShareName(ctx context.Context, providerName string, shareName string) (*sharing.ListProviderShareAssetsResponse, error) { + ret := _m.Called(ctx, providerName, shareName) + + if len(ret) == 0 { + panic("no return value specified for ListProviderShareAssetsByProviderNameAndShareName") + } + + var r0 *sharing.ListProviderShareAssetsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string) (*sharing.ListProviderShareAssetsResponse, error)); ok { + return rf(ctx, providerName, shareName) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string) *sharing.ListProviderShareAssetsResponse); ok { + r0 = rf(ctx, providerName, shareName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sharing.ListProviderShareAssetsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string) error); ok { + r1 = rf(ctx, providerName, shareName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListProviderShareAssetsByProviderNameAndShareName' +type MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call struct { + *mock.Call +} + +// ListProviderShareAssetsByProviderNameAndShareName is a helper method to define mock.On call +// - ctx context.Context +// - providerName string +// - shareName string +func (_e *MockProvidersInterface_Expecter) ListProviderShareAssetsByProviderNameAndShareName(ctx interface{}, providerName interface{}, shareName interface{}) *MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call { + return &MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call{Call: _e.mock.On("ListProviderShareAssetsByProviderNameAndShareName", ctx, providerName, shareName)} +} + +func (_c *MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call) Run(run func(ctx context.Context, providerName string, shareName string)) *MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string)) + }) + return _c +} + +func (_c *MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call) Return(_a0 *sharing.ListProviderShareAssetsResponse, _a1 error) *MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call) RunAndReturn(run func(context.Context, string, string) (*sharing.ListProviderShareAssetsResponse, error)) *MockProvidersInterface_ListProviderShareAssetsByProviderNameAndShareName_Call { + _c.Call.Return(run) + return _c +} + // ListShares provides a mock function with given fields: ctx, request func (_m *MockProvidersInterface) ListShares(ctx context.Context, request sharing.ListSharesRequest) listing.Iterator[sharing.ProviderShare] { ret := _m.Called(ctx, request) diff --git a/experimental/mocks/service/sharing/mock_shares_interface.go b/experimental/mocks/service/sharing/mock_shares_interface.go index 006b7a8ea..873133875 100644 --- a/experimental/mocks/service/sharing/mock_shares_interface.go +++ b/experimental/mocks/service/sharing/mock_shares_interface.go @@ -5,10 +5,7 @@ package sharing import ( context "context" - catalog "github.com/databricks/databricks-sdk-go/service/catalog" - listing "github.com/databricks/databricks-sdk-go/listing" - mock "github.com/stretchr/testify/mock" sharing "github.com/databricks/databricks-sdk-go/service/sharing" @@ -407,23 +404,23 @@ func (_c *MockSharesInterface_ListAll_Call) RunAndReturn(run func(context.Contex } // SharePermissions provides a mock function with given fields: ctx, request -func (_m *MockSharesInterface) SharePermissions(ctx context.Context, request sharing.SharePermissionsRequest) (*catalog.PermissionsList, error) { +func (_m *MockSharesInterface) SharePermissions(ctx context.Context, request sharing.SharePermissionsRequest) (*sharing.GetSharePermissionsResponse, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for SharePermissions") } - var r0 *catalog.PermissionsList + var r0 *sharing.GetSharePermissionsResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, sharing.SharePermissionsRequest) (*catalog.PermissionsList, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, sharing.SharePermissionsRequest) (*sharing.GetSharePermissionsResponse, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, sharing.SharePermissionsRequest) *catalog.PermissionsList); ok { + if rf, ok := ret.Get(0).(func(context.Context, sharing.SharePermissionsRequest) *sharing.GetSharePermissionsResponse); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*catalog.PermissionsList) + r0 = ret.Get(0).(*sharing.GetSharePermissionsResponse) } } @@ -455,34 +452,34 @@ func (_c *MockSharesInterface_SharePermissions_Call) Run(run func(ctx context.Co return _c } -func (_c *MockSharesInterface_SharePermissions_Call) Return(_a0 *catalog.PermissionsList, _a1 error) *MockSharesInterface_SharePermissions_Call { +func (_c *MockSharesInterface_SharePermissions_Call) Return(_a0 *sharing.GetSharePermissionsResponse, _a1 error) *MockSharesInterface_SharePermissions_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockSharesInterface_SharePermissions_Call) RunAndReturn(run func(context.Context, sharing.SharePermissionsRequest) (*catalog.PermissionsList, error)) *MockSharesInterface_SharePermissions_Call { +func (_c *MockSharesInterface_SharePermissions_Call) RunAndReturn(run func(context.Context, sharing.SharePermissionsRequest) (*sharing.GetSharePermissionsResponse, error)) *MockSharesInterface_SharePermissions_Call { _c.Call.Return(run) return _c } // SharePermissionsByName provides a mock function with given fields: ctx, name -func (_m *MockSharesInterface) SharePermissionsByName(ctx context.Context, name string) (*catalog.PermissionsList, error) { +func (_m *MockSharesInterface) SharePermissionsByName(ctx context.Context, name string) (*sharing.GetSharePermissionsResponse, error) { ret := _m.Called(ctx, name) if len(ret) == 0 { panic("no return value specified for SharePermissionsByName") } - var r0 *catalog.PermissionsList + var r0 *sharing.GetSharePermissionsResponse var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (*catalog.PermissionsList, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) (*sharing.GetSharePermissionsResponse, error)); ok { return rf(ctx, name) } - if rf, ok := ret.Get(0).(func(context.Context, string) *catalog.PermissionsList); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) *sharing.GetSharePermissionsResponse); ok { r0 = rf(ctx, name) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*catalog.PermissionsList) + r0 = ret.Get(0).(*sharing.GetSharePermissionsResponse) } } @@ -514,12 +511,12 @@ func (_c *MockSharesInterface_SharePermissionsByName_Call) Run(run func(ctx cont return _c } -func (_c *MockSharesInterface_SharePermissionsByName_Call) Return(_a0 *catalog.PermissionsList, _a1 error) *MockSharesInterface_SharePermissionsByName_Call { +func (_c *MockSharesInterface_SharePermissionsByName_Call) Return(_a0 *sharing.GetSharePermissionsResponse, _a1 error) *MockSharesInterface_SharePermissionsByName_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockSharesInterface_SharePermissionsByName_Call) RunAndReturn(run func(context.Context, string) (*catalog.PermissionsList, error)) *MockSharesInterface_SharePermissionsByName_Call { +func (_c *MockSharesInterface_SharePermissionsByName_Call) RunAndReturn(run func(context.Context, string) (*sharing.GetSharePermissionsResponse, error)) *MockSharesInterface_SharePermissionsByName_Call { _c.Call.Return(run) return _c } @@ -584,21 +581,33 @@ func (_c *MockSharesInterface_Update_Call) RunAndReturn(run func(context.Context } // UpdatePermissions provides a mock function with given fields: ctx, request -func (_m *MockSharesInterface) UpdatePermissions(ctx context.Context, request sharing.UpdateSharePermissions) error { +func (_m *MockSharesInterface) UpdatePermissions(ctx context.Context, request sharing.UpdateSharePermissions) (*sharing.UpdateSharePermissionsResponse, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for UpdatePermissions") } - var r0 error - if rf, ok := ret.Get(0).(func(context.Context, sharing.UpdateSharePermissions) error); ok { + var r0 *sharing.UpdateSharePermissionsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sharing.UpdateSharePermissions) (*sharing.UpdateSharePermissionsResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sharing.UpdateSharePermissions) *sharing.UpdateSharePermissionsResponse); ok { r0 = rf(ctx, request) } else { - r0 = ret.Error(0) + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sharing.UpdateSharePermissionsResponse) + } } - return r0 + if rf, ok := ret.Get(1).(func(context.Context, sharing.UpdateSharePermissions) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 } // MockSharesInterface_UpdatePermissions_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdatePermissions' @@ -620,12 +629,12 @@ func (_c *MockSharesInterface_UpdatePermissions_Call) Run(run func(ctx context.C return _c } -func (_c *MockSharesInterface_UpdatePermissions_Call) Return(_a0 error) *MockSharesInterface_UpdatePermissions_Call { - _c.Call.Return(_a0) +func (_c *MockSharesInterface_UpdatePermissions_Call) Return(_a0 *sharing.UpdateSharePermissionsResponse, _a1 error) *MockSharesInterface_UpdatePermissions_Call { + _c.Call.Return(_a0, _a1) return _c } -func (_c *MockSharesInterface_UpdatePermissions_Call) RunAndReturn(run func(context.Context, sharing.UpdateSharePermissions) error) *MockSharesInterface_UpdatePermissions_Call { +func (_c *MockSharesInterface_UpdatePermissions_Call) RunAndReturn(run func(context.Context, sharing.UpdateSharePermissions) (*sharing.UpdateSharePermissionsResponse, error)) *MockSharesInterface_UpdatePermissions_Call { _c.Call.Return(run) return _c } diff --git a/service/apps/model.go b/service/apps/model.go index 4ff7ace55..0a5ba614f 100755 --- a/service/apps/model.go +++ b/service/apps/model.go @@ -15,6 +15,8 @@ type App struct { AppStatus *ApplicationStatus `json:"app_status,omitempty"` + BudgetPolicyId string `json:"budget_policy_id,omitempty"` + ComputeStatus *ComputeStatus `json:"compute_status,omitempty"` // The creation time of the app. Formatted timestamp in ISO 6801. CreateTime string `json:"create_time,omitempty"` @@ -26,6 +28,8 @@ type App struct { DefaultSourceCodePath string `json:"default_source_code_path,omitempty"` // The description of the app. Description string `json:"description,omitempty"` + + EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` // The unique identifier of the app. Id string `json:"id,omitempty"` // The name of the app. The name must contain only lowercase alphanumeric diff --git a/service/billing/model.go b/service/billing/model.go index d95320f06..86d5fa32b 100644 --- a/service/billing/model.go +++ b/service/billing/model.go @@ -258,7 +258,7 @@ type BudgetPolicy struct { CustomTags []compute.CustomPolicyTag `json:"custom_tags,omitempty"` // The Id of the policy. This field is generated by Databricks and globally // unique. - PolicyId string `json:"policy_id"` + PolicyId string `json:"policy_id,omitempty"` // The name of the policy. - Must be unique among active policies. - Can // contain only characters from the ISO 8859-1 (latin1) set. PolicyName string `json:"policy_name,omitempty"` @@ -390,13 +390,10 @@ type CreateBudgetConfigurationResponse struct { // A request to create a BudgetPolicy. type CreateBudgetPolicyRequest struct { - // A list of tags defined by the customer. At most 40 entries are allowed - // per policy. - CustomTags []compute.CustomPolicyTag `json:"custom_tags,omitempty"` - // The name of the policy. - Must be unique among active policies. - Can - // contain only characters of 0-9, a-z, A-Z, -, =, ., :, /, @, _, +, - // whitespace. - PolicyName string `json:"policy_name,omitempty"` + // The policy to create. `policy_id` needs to be empty as it will be + // generated `policy_name` must be provided, custom_tags may need to be + // provided depending on the cloud provider. All other fields are optional. + Policy *BudgetPolicy `json:"policy,omitempty"` // A unique identifier for this request. Restricted to 36 ASCII characters. // A random UUID is recommended. This request is only idempotent if a // `request_id` is provided. diff --git a/service/catalog/model.go b/service/catalog/model.go index 6673ab8cf..6b90fdb82 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -659,6 +659,8 @@ const ConnectionTypeHttp ConnectionType = `HTTP` const ConnectionTypeMysql ConnectionType = `MYSQL` +const ConnectionTypeOracle ConnectionType = `ORACLE` + const ConnectionTypePostgresql ConnectionType = `POSTGRESQL` const ConnectionTypeRedshift ConnectionType = `REDSHIFT` @@ -669,6 +671,8 @@ const ConnectionTypeSqldw ConnectionType = `SQLDW` const ConnectionTypeSqlserver ConnectionType = `SQLSERVER` +const ConnectionTypeTeradata ConnectionType = `TERADATA` + // String representation for [fmt.Print] func (f *ConnectionType) String() string { return string(*f) @@ -677,11 +681,11 @@ func (f *ConnectionType) String() string { // Set raw string value and validate it against allowed values func (f *ConnectionType) Set(v string) error { switch v { - case `BIGQUERY`, `DATABRICKS`, `GLUE`, `HIVE_METASTORE`, `HTTP`, `MYSQL`, `POSTGRESQL`, `REDSHIFT`, `SNOWFLAKE`, `SQLDW`, `SQLSERVER`: + case `BIGQUERY`, `DATABRICKS`, `GLUE`, `HIVE_METASTORE`, `HTTP`, `MYSQL`, `ORACLE`, `POSTGRESQL`, `REDSHIFT`, `SNOWFLAKE`, `SQLDW`, `SQLSERVER`, `TERADATA`: *f = ConnectionType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BIGQUERY", "DATABRICKS", "GLUE", "HIVE_METASTORE", "HTTP", "MYSQL", "POSTGRESQL", "REDSHIFT", "SNOWFLAKE", "SQLDW", "SQLSERVER"`, v) + return fmt.Errorf(`value "%s" is not one of "BIGQUERY", "DATABRICKS", "GLUE", "HIVE_METASTORE", "HTTP", "MYSQL", "ORACLE", "POSTGRESQL", "REDSHIFT", "SNOWFLAKE", "SQLDW", "SQLSERVER", "TERADATA"`, v) } } @@ -5256,6 +5260,23 @@ func (f *TableType) Type() string { return "TableType" } +type TagKeyValue struct { + // name of the tag + Key string `json:"key,omitempty"` + // value of the tag associated with the key, could be optional + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *TagKeyValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TagKeyValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type TemporaryCredentials struct { // AWS temporary credentials for API authentication. Read more at // https://docs.aws.amazon.com/STS/latest/APIReference/API_Credentials.html. @@ -5820,6 +5841,9 @@ type ValidateCredentialRequest struct { // Required. The name of an existing credential or long-lived cloud // credential to validate. CredentialName string `json:"credential_name,omitempty"` + // GCP long-lived credential. Databricks-created Google Cloud Storage + // service account. + DatabricksGcpServiceAccount *DatabricksGcpServiceAccount `json:"databricks_gcp_service_account,omitempty"` // The name of an existing external location to validate. Only applicable // for storage credentials (purpose is **STORAGE**.) ExternalLocationName string `json:"external_location_name,omitempty"` diff --git a/service/cleanrooms/model.go b/service/cleanrooms/model.go index 4e49f301b..ac92cb087 100755 --- a/service/cleanrooms/model.go +++ b/service/cleanrooms/model.go @@ -247,7 +247,7 @@ type CleanRoomAssetTableLocalDetails struct { // the format of *catalog*.*schema*.*table_name* LocalName string `json:"local_name,omitempty"` // Partition filtering specification for a shared table. - Partitions []sharing.PartitionSpecificationPartition `json:"partitions,omitempty"` + Partitions []sharing.Partition `json:"partitions,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -640,7 +640,8 @@ type ListCleanRoomNotebookTaskRunsRequest struct { CleanRoomName string `json:"-" url:"-"` // Notebook name NotebookName string `json:"-" url:"notebook_name,omitempty"` - // The maximum number of task runs to return + // The maximum number of task runs to return. Currently ignored - all runs + // will be returned. PageSize int `json:"-" url:"page_size,omitempty"` // Opaque pagination token to go to next page based on previous query. PageToken string `json:"-" url:"page_token,omitempty"` diff --git a/service/compute/model.go b/service/compute/model.go index 6aa55693f..32881163f 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -439,7 +439,7 @@ type ClusterAttributes struct { // Data security mode decides what data governance model to use when // accessing data from a cluster. // - // The following modes can only be used with `kind`. * + // The following modes can only be used when `kind = CLASSIC_PREVIEW`. * // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate // access mode depending on your compute configuration. * // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * @@ -493,7 +493,7 @@ type ClusterAttributes struct { InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` // The optional ID of the instance pool to which the cluster belongs. InstancePoolId string `json:"instance_pool_id,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // When set to true, Databricks will automatically set single node related // `custom_tags`, `spark_conf`, and `num_workers` @@ -503,8 +503,18 @@ type ClusterAttributes struct { // Depending on `kind`, different validations and default values will be // applied. // - // The first usage of this value is for the simple cluster form where it - // sets `kind = CLASSIC_PREVIEW`. + // Clusters with `kind = CLASSIC_PREVIEW` support the following fields, + // whereas clusters with no specified `kind` do not. * + // [is_single_node](/api/workspace/clusters/create#is_single_node) * + // [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) * + // [data_security_mode](/api/workspace/clusters/create#data_security_mode) + // set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or + // `DATA_SECURITY_MODE_STANDARD` + // + // By using the [simple form], your clusters are automatically using `kind = + // CLASSIC_PREVIEW`. + // + // [simple form]: https://docs.databricks.com/compute/simple-form.html Kind Kind `json:"kind,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can @@ -553,7 +563,7 @@ type ClusterAttributes struct { // cluster. The corresponding private keys can be used to login with the // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []string `json:"ssh_public_keys,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // `effective_spark_version` is determined by `spark_version` (DBR release), // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or @@ -654,7 +664,7 @@ type ClusterDetails struct { // Data security mode decides what data governance model to use when // accessing data from a cluster. // - // The following modes can only be used with `kind`. * + // The following modes can only be used when `kind = CLASSIC_PREVIEW`. * // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate // access mode depending on your compute configuration. * // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * @@ -727,7 +737,7 @@ type ClusterDetails struct { InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` // The optional ID of the instance pool to which the cluster belongs. InstancePoolId string `json:"instance_pool_id,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // When set to true, Databricks will automatically set single node related // `custom_tags`, `spark_conf`, and `num_workers` @@ -740,8 +750,18 @@ type ClusterDetails struct { // Depending on `kind`, different validations and default values will be // applied. // - // The first usage of this value is for the simple cluster form where it - // sets `kind = CLASSIC_PREVIEW`. + // Clusters with `kind = CLASSIC_PREVIEW` support the following fields, + // whereas clusters with no specified `kind` do not. * + // [is_single_node](/api/workspace/clusters/create#is_single_node) * + // [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) * + // [data_security_mode](/api/workspace/clusters/create#data_security_mode) + // set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or + // `DATA_SECURITY_MODE_STANDARD` + // + // By using the [simple form], your clusters are automatically using `kind = + // CLASSIC_PREVIEW`. + // + // [simple form]: https://docs.databricks.com/compute/simple-form.html Kind Kind `json:"kind,omitempty"` // the timestamp that the cluster was started/restarted LastRestartedTime int64 `json:"last_restarted_time,omitempty"` @@ -829,7 +849,7 @@ type ClusterDetails struct { // Information about why the cluster was terminated. This field only appears // when the cluster is in a `TERMINATING` or `TERMINATED` state. TerminationReason *TerminationReason `json:"termination_reason,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // `effective_spark_version` is determined by `spark_version` (DBR release), // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or @@ -1261,7 +1281,7 @@ type ClusterSpec struct { // Data security mode decides what data governance model to use when // accessing data from a cluster. // - // The following modes can only be used with `kind`. * + // The following modes can only be used when `kind = CLASSIC_PREVIEW`. * // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate // access mode depending on your compute configuration. * // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * @@ -1315,7 +1335,7 @@ type ClusterSpec struct { InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` // The optional ID of the instance pool to which the cluster belongs. InstancePoolId string `json:"instance_pool_id,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // When set to true, Databricks will automatically set single node related // `custom_tags`, `spark_conf`, and `num_workers` @@ -1325,8 +1345,18 @@ type ClusterSpec struct { // Depending on `kind`, different validations and default values will be // applied. // - // The first usage of this value is for the simple cluster form where it - // sets `kind = CLASSIC_PREVIEW`. + // Clusters with `kind = CLASSIC_PREVIEW` support the following fields, + // whereas clusters with no specified `kind` do not. * + // [is_single_node](/api/workspace/clusters/create#is_single_node) * + // [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) * + // [data_security_mode](/api/workspace/clusters/create#data_security_mode) + // set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or + // `DATA_SECURITY_MODE_STANDARD` + // + // By using the [simple form], your clusters are automatically using `kind = + // CLASSIC_PREVIEW`. + // + // [simple form]: https://docs.databricks.com/compute/simple-form.html Kind Kind `json:"kind,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can @@ -1386,7 +1416,7 @@ type ClusterSpec struct { // cluster. The corresponding private keys can be used to login with the // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []string `json:"ssh_public_keys,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // `effective_spark_version` is determined by `spark_version` (DBR release), // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or @@ -1594,7 +1624,7 @@ type CreateCluster struct { // Data security mode decides what data governance model to use when // accessing data from a cluster. // - // The following modes can only be used with `kind`. * + // The following modes can only be used when `kind = CLASSIC_PREVIEW`. * // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate // access mode depending on your compute configuration. * // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * @@ -1648,7 +1678,7 @@ type CreateCluster struct { InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` // The optional ID of the instance pool to which the cluster belongs. InstancePoolId string `json:"instance_pool_id,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // When set to true, Databricks will automatically set single node related // `custom_tags`, `spark_conf`, and `num_workers` @@ -1658,8 +1688,18 @@ type CreateCluster struct { // Depending on `kind`, different validations and default values will be // applied. // - // The first usage of this value is for the simple cluster form where it - // sets `kind = CLASSIC_PREVIEW`. + // Clusters with `kind = CLASSIC_PREVIEW` support the following fields, + // whereas clusters with no specified `kind` do not. * + // [is_single_node](/api/workspace/clusters/create#is_single_node) * + // [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) * + // [data_security_mode](/api/workspace/clusters/create#data_security_mode) + // set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or + // `DATA_SECURITY_MODE_STANDARD` + // + // By using the [simple form], your clusters are automatically using `kind = + // CLASSIC_PREVIEW`. + // + // [simple form]: https://docs.databricks.com/compute/simple-form.html Kind Kind `json:"kind,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can @@ -1719,7 +1759,7 @@ type CreateCluster struct { // cluster. The corresponding private keys can be used to login with the // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []string `json:"ssh_public_keys,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // `effective_spark_version` is determined by `spark_version` (DBR release), // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or @@ -2021,7 +2061,7 @@ func (f *DataPlaneEventDetailsEventType) Type() string { // Data security mode decides what data governance model to use when accessing // data from a cluster. // -// The following modes can only be used with `kind`. * +// The following modes can only be used when `kind = CLASSIC_PREVIEW`. * // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate access // mode depending on your compute configuration. * // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * @@ -2368,7 +2408,7 @@ type EditCluster struct { // Data security mode decides what data governance model to use when // accessing data from a cluster. // - // The following modes can only be used with `kind`. * + // The following modes can only be used when `kind = CLASSIC_PREVIEW`. * // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate // access mode depending on your compute configuration. * // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * @@ -2422,7 +2462,7 @@ type EditCluster struct { InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` // The optional ID of the instance pool to which the cluster belongs. InstancePoolId string `json:"instance_pool_id,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // When set to true, Databricks will automatically set single node related // `custom_tags`, `spark_conf`, and `num_workers` @@ -2432,8 +2472,18 @@ type EditCluster struct { // Depending on `kind`, different validations and default values will be // applied. // - // The first usage of this value is for the simple cluster form where it - // sets `kind = CLASSIC_PREVIEW`. + // Clusters with `kind = CLASSIC_PREVIEW` support the following fields, + // whereas clusters with no specified `kind` do not. * + // [is_single_node](/api/workspace/clusters/create#is_single_node) * + // [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) * + // [data_security_mode](/api/workspace/clusters/create#data_security_mode) + // set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or + // `DATA_SECURITY_MODE_STANDARD` + // + // By using the [simple form], your clusters are automatically using `kind = + // CLASSIC_PREVIEW`. + // + // [simple form]: https://docs.databricks.com/compute/simple-form.html Kind Kind `json:"kind,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can @@ -2493,7 +2543,7 @@ type EditCluster struct { // cluster. The corresponding private keys can be used to login with the // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []string `json:"ssh_public_keys,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // `effective_spark_version` is determined by `spark_version` (DBR release), // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or @@ -3949,8 +3999,18 @@ func (s InstanceProfile) MarshalJSON() ([]byte, error) { // Depending on `kind`, different validations and default values will be // applied. // -// The first usage of this value is for the simple cluster form where it sets -// `kind = CLASSIC_PREVIEW`. +// Clusters with `kind = CLASSIC_PREVIEW` support the following fields, whereas +// clusters with no specified `kind` do not. * +// [is_single_node](/api/workspace/clusters/create#is_single_node) * +// [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) * +// [data_security_mode](/api/workspace/clusters/create#data_security_mode) set +// to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or +// `DATA_SECURITY_MODE_STANDARD` +// +// By using the [simple form], your clusters are automatically using `kind = +// CLASSIC_PREVIEW`. +// +// [simple form]: https://docs.databricks.com/compute/simple-form.html type Kind string const KindClassicPreview Kind = `CLASSIC_PREVIEW` @@ -5368,7 +5428,7 @@ type UpdateClusterResource struct { // Data security mode decides what data governance model to use when // accessing data from a cluster. // - // The following modes can only be used with `kind`. * + // The following modes can only be used when `kind = CLASSIC_PREVIEW`. * // `DATA_SECURITY_MODE_AUTO`: Databricks will choose the most appropriate // access mode depending on your compute configuration. * // `DATA_SECURITY_MODE_STANDARD`: Alias for `USER_ISOLATION`. * @@ -5422,7 +5482,7 @@ type UpdateClusterResource struct { InitScripts []InitScriptInfo `json:"init_scripts,omitempty"` // The optional ID of the instance pool to which the cluster belongs. InstancePoolId string `json:"instance_pool_id,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // When set to true, Databricks will automatically set single node related // `custom_tags`, `spark_conf`, and `num_workers` @@ -5432,8 +5492,18 @@ type UpdateClusterResource struct { // Depending on `kind`, different validations and default values will be // applied. // - // The first usage of this value is for the simple cluster form where it - // sets `kind = CLASSIC_PREVIEW`. + // Clusters with `kind = CLASSIC_PREVIEW` support the following fields, + // whereas clusters with no specified `kind` do not. * + // [is_single_node](/api/workspace/clusters/create#is_single_node) * + // [use_ml_runtime](/api/workspace/clusters/create#use_ml_runtime) * + // [data_security_mode](/api/workspace/clusters/create#data_security_mode) + // set to `DATA_SECURITY_MODE_AUTO`, `DATA_SECURITY_MODE_DEDICATED`, or + // `DATA_SECURITY_MODE_STANDARD` + // + // By using the [simple form], your clusters are automatically using `kind = + // CLASSIC_PREVIEW`. + // + // [simple form]: https://docs.databricks.com/compute/simple-form.html Kind Kind `json:"kind,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can @@ -5493,7 +5563,7 @@ type UpdateClusterResource struct { // cluster. The corresponding private keys can be used to login with the // user name `ubuntu` on port `2200`. Up to 10 keys can be specified. SshPublicKeys []string `json:"ssh_public_keys,omitempty"` - // This field can only be used with `kind`. + // This field can only be used when `kind = CLASSIC_PREVIEW`. // // `effective_spark_version` is determined by `spark_version` (DBR release), // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or diff --git a/service/dashboards/api.go b/service/dashboards/api.go index bba3aa4ba..1f7df53c2 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -22,8 +22,9 @@ type GenieInterface interface { // Create conversation message. // - // Create new message in [conversation](:method:genie/startconversation). The AI - // response uses all previously created messages in the conversation to respond. + // Create new message in a [conversation](:method:genie/startconversation). The + // AI response uses all previously created messages in the conversation to + // respond. CreateMessage(ctx context.Context, genieCreateConversationMessageRequest GenieCreateConversationMessageRequest) (*WaitGetMessageGenieCompleted[GenieMessage], error) // Calls [GenieAPIInterface.CreateMessage] and waits to reach COMPLETED state @@ -49,32 +50,44 @@ type GenieInterface interface { // Get message from conversation. GetMessageBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieMessage, error) - // Get conversation message SQL query result. + // [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This is // only available if a message has a query attachment and the message status is // `EXECUTING_QUERY`. GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) - // Get conversation message SQL query result. + // [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This is // only available if a message has a query attachment and the message status is // `EXECUTING_QUERY`. GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieGetMessageQueryResultResponse, error) - // Get conversation message SQL query result by attachment id. + // Get conversation message SQL query result. // - // Get the result of SQL query by attachment id This is only available if a - // message has a query attachment and the message status is `EXECUTING_QUERY`. + // Get the result of SQL query if the message has a query attachment. This is + // only available if a message has a query attachment and the message status is + // `EXECUTING_QUERY` OR `COMPLETED`. GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) - // Get conversation message SQL query result by attachment id. + // Get conversation message SQL query result. // - // Get the result of SQL query by attachment id This is only available if a - // message has a query attachment and the message status is `EXECUTING_QUERY`. + // Get the result of SQL query if the message has a query attachment. This is + // only available if a message has a query attachment and the message status is + // `EXECUTING_QUERY` OR `COMPLETED`. GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) + // Get details of a Genie Space. + // + // Get a Genie Space. + GetSpace(ctx context.Context, request GenieGetSpaceRequest) (*GenieSpace, error) + + // Get details of a Genie Space. + // + // Get a Genie Space. + GetSpaceBySpaceId(ctx context.Context, spaceId string) (*GenieSpace, error) + // Start conversation. // // Start a new conversation. @@ -166,8 +179,9 @@ func (w *WaitGetMessageGenieCompleted[R]) GetWithTimeout(timeout time.Duration) // Create conversation message. // -// Create new message in [conversation](:method:genie/startconversation). The AI -// response uses all previously created messages in the conversation to respond. +// Create new message in a [conversation](:method:genie/startconversation). The +// AI response uses all previously created messages in the conversation to +// respond. func (a *GenieAPI) CreateMessage(ctx context.Context, genieCreateConversationMessageRequest GenieCreateConversationMessageRequest) (*WaitGetMessageGenieCompleted[GenieMessage], error) { genieMessage, err := a.genieImpl.CreateMessage(ctx, genieCreateConversationMessageRequest) if err != nil { @@ -224,7 +238,7 @@ func (a *GenieAPI) GetMessageBySpaceIdAndConversationIdAndMessageId(ctx context. }) } -// Get conversation message SQL query result. +// [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This is // only available if a message has a query attachment and the message status is @@ -237,10 +251,11 @@ func (a *GenieAPI) GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(c }) } -// Get conversation message SQL query result by attachment id. +// Get conversation message SQL query result. // -// Get the result of SQL query by attachment id This is only available if a -// message has a query attachment and the message status is `EXECUTING_QUERY`. +// Get the result of SQL query if the message has a query attachment. This is +// only available if a message has a query attachment and the message status is +// `EXECUTING_QUERY` OR `COMPLETED`. func (a *GenieAPI) GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) { return a.genieImpl.GetMessageQueryResultByAttachment(ctx, GenieGetQueryResultByAttachmentRequest{ SpaceId: spaceId, @@ -250,6 +265,15 @@ func (a *GenieAPI) GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAn }) } +// Get details of a Genie Space. +// +// Get a Genie Space. +func (a *GenieAPI) GetSpaceBySpaceId(ctx context.Context, spaceId string) (*GenieSpace, error) { + return a.genieImpl.GetSpace(ctx, GenieGetSpaceRequest{ + SpaceId: spaceId, + }) +} + // Start conversation. // // Start a new conversation. diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index 0ea0af648..e7d46e03e 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -68,6 +68,16 @@ func (a *genieImpl) GetMessageQueryResultByAttachment(ctx context.Context, reque return &genieGetMessageQueryResultResponse, err } +func (a *genieImpl) GetSpace(ctx context.Context, request GenieGetSpaceRequest) (*GenieSpace, error) { + var genieSpace GenieSpace + path := fmt.Sprintf("/api/2.0/genie/spaces/%v", request.SpaceId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieSpace) + return &genieSpace, err +} + func (a *genieImpl) StartConversation(ctx context.Context, request GenieStartConversationMessageRequest) (*GenieStartConversationResponse, error) { var genieStartConversationResponse GenieStartConversationResponse path := fmt.Sprintf("/api/2.0/genie/spaces/%v/start-conversation", request.SpaceId) diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index f5892cd60..0c8eb4947 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -15,7 +15,7 @@ type GenieService interface { // Create conversation message. // - // Create new message in [conversation](:method:genie/startconversation). + // Create new message in a [conversation](:method:genie/startconversation). // The AI response uses all previously created messages in the conversation // to respond. CreateMessage(ctx context.Context, request GenieCreateConversationMessageRequest) (*GenieMessage, error) @@ -30,20 +30,25 @@ type GenieService interface { // Get message from conversation. GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) - // Get conversation message SQL query result. + // [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This // is only available if a message has a query attachment and the message // status is `EXECUTING_QUERY`. GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) - // Get conversation message SQL query result by attachment id. + // Get conversation message SQL query result. // - // Get the result of SQL query by attachment id This is only available if a - // message has a query attachment and the message status is - // `EXECUTING_QUERY`. + // Get the result of SQL query if the message has a query attachment. This + // is only available if a message has a query attachment and the message + // status is `EXECUTING_QUERY` OR `COMPLETED`. GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) + // Get details of a Genie Space. + // + // Get a Genie Space. + GetSpace(ctx context.Context, request GenieGetSpaceRequest) (*GenieSpace, error) + // Start conversation. // // Start a new conversation. diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 563fed5fa..dd52ed9da 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -142,63 +142,6 @@ func (f *DashboardView) Type() string { return "DashboardView" } -type DataType string - -const DataTypeDataTypeArray DataType = `DATA_TYPE_ARRAY` - -const DataTypeDataTypeBigInt DataType = `DATA_TYPE_BIG_INT` - -const DataTypeDataTypeBinary DataType = `DATA_TYPE_BINARY` - -const DataTypeDataTypeBoolean DataType = `DATA_TYPE_BOOLEAN` - -const DataTypeDataTypeDate DataType = `DATA_TYPE_DATE` - -const DataTypeDataTypeDecimal DataType = `DATA_TYPE_DECIMAL` - -const DataTypeDataTypeDouble DataType = `DATA_TYPE_DOUBLE` - -const DataTypeDataTypeFloat DataType = `DATA_TYPE_FLOAT` - -const DataTypeDataTypeInt DataType = `DATA_TYPE_INT` - -const DataTypeDataTypeInterval DataType = `DATA_TYPE_INTERVAL` - -const DataTypeDataTypeMap DataType = `DATA_TYPE_MAP` - -const DataTypeDataTypeSmallInt DataType = `DATA_TYPE_SMALL_INT` - -const DataTypeDataTypeString DataType = `DATA_TYPE_STRING` - -const DataTypeDataTypeStruct DataType = `DATA_TYPE_STRUCT` - -const DataTypeDataTypeTimestamp DataType = `DATA_TYPE_TIMESTAMP` - -const DataTypeDataTypeTinyInt DataType = `DATA_TYPE_TINY_INT` - -const DataTypeDataTypeVoid DataType = `DATA_TYPE_VOID` - -// String representation for [fmt.Print] -func (f *DataType) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *DataType) Set(v string) error { - switch v { - case `DATA_TYPE_ARRAY`, `DATA_TYPE_BIG_INT`, `DATA_TYPE_BINARY`, `DATA_TYPE_BOOLEAN`, `DATA_TYPE_DATE`, `DATA_TYPE_DECIMAL`, `DATA_TYPE_DOUBLE`, `DATA_TYPE_FLOAT`, `DATA_TYPE_INT`, `DATA_TYPE_INTERVAL`, `DATA_TYPE_MAP`, `DATA_TYPE_SMALL_INT`, `DATA_TYPE_STRING`, `DATA_TYPE_STRUCT`, `DATA_TYPE_TIMESTAMP`, `DATA_TYPE_TINY_INT`, `DATA_TYPE_VOID`: - *f = DataType(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "DATA_TYPE_ARRAY", "DATA_TYPE_BIG_INT", "DATA_TYPE_BINARY", "DATA_TYPE_BOOLEAN", "DATA_TYPE_DATE", "DATA_TYPE_DECIMAL", "DATA_TYPE_DOUBLE", "DATA_TYPE_FLOAT", "DATA_TYPE_INT", "DATA_TYPE_INTERVAL", "DATA_TYPE_MAP", "DATA_TYPE_SMALL_INT", "DATA_TYPE_STRING", "DATA_TYPE_STRUCT", "DATA_TYPE_TIMESTAMP", "DATA_TYPE_TINY_INT", "DATA_TYPE_VOID"`, v) - } -} - -// Type always returns DataType to satisfy [pflag.Value] interface -func (f *DataType) Type() string { - return "DataType" -} - // Delete dashboard schedule type DeleteScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. @@ -285,15 +228,30 @@ type ExecuteQueryResponse struct { // Genie AI Response type GenieAttachment struct { - Query *QueryAttachment `json:"query,omitempty"` - + // Attachment ID + AttachmentId string `json:"attachment_id,omitempty"` + // Query Attachment if Genie responds with a SQL query + Query *GenieQueryAttachment `json:"query,omitempty"` + // Text Attachment if Genie responds with text Text *TextAttachment `json:"text,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GenieAttachment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieAttachment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type GenieConversation struct { + // Conversation ID + ConversationId string `json:"conversation_id"` // Timestamp when the message was created CreatedTimestamp int64 `json:"created_timestamp,omitempty"` - // Conversation ID + // Conversation ID. Legacy identifier, use conversation_id instead Id string `json:"id"` // Timestamp when the message was last updated LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` @@ -346,7 +304,7 @@ type GenieGetConversationMessageRequest struct { SpaceId string `json:"-" url:"-"` } -// Get conversation message SQL query result +// [Deprecated] Get conversation message SQL query result type GenieGetMessageQueryResultRequest struct { // Conversation ID ConversationId string `json:"-" url:"-"` @@ -362,7 +320,7 @@ type GenieGetMessageQueryResultResponse struct { StatementResponse *sql.StatementResponse `json:"statement_response,omitempty"` } -// Get conversation message SQL query result by attachment id +// Get conversation message SQL query result type GenieGetQueryResultByAttachmentRequest struct { // Attachment ID AttachmentId string `json:"-" url:"-"` @@ -374,8 +332,14 @@ type GenieGetQueryResultByAttachmentRequest struct { SpaceId string `json:"-" url:"-"` } +// Get details of a Genie Space +type GenieGetSpaceRequest struct { + // The ID associated with the Genie space + SpaceId string `json:"-" url:"-"` +} + type GenieMessage struct { - // AI produced response to the message + // AI-generated response to the message Attachments []GenieAttachment `json:"attachments,omitempty"` // User message content Content string `json:"content"` @@ -383,34 +347,35 @@ type GenieMessage struct { ConversationId string `json:"conversation_id"` // Timestamp when the message was created CreatedTimestamp int64 `json:"created_timestamp,omitempty"` - // Error message if AI failed to respond to the message + // Error message if Genie failed to respond to the message Error *MessageError `json:"error,omitempty"` - // Message ID + // Message ID. Legacy identifier, use message_id instead Id string `json:"id"` // Timestamp when the message was last updated LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` - // The result of SQL query if the message has a query attachment + // Message ID + MessageId string `json:"message_id"` + // The result of SQL query if the message includes a query attachment. + // Deprecated. Use `query_result_metadata` in `GenieQueryAttachment` + // instead. QueryResult *Result `json:"query_result,omitempty"` // Genie space ID SpaceId string `json:"space_id"` - // MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching + // MessageStatus. The possible values are: * `FETCHING_METADATA`: Fetching // metadata from the data sources. * `FILTERING_CONTEXT`: Running smart // context step to determine relevant context. * `ASKING_AI`: Waiting for - // the LLM to respond to the users question. * `PENDING_WAREHOUSE`: Waiting + // the LLM to respond to the user's question. * `PENDING_WAREHOUSE`: Waiting // for warehouse before the SQL query can start executing. * - // `EXECUTING_QUERY`: Executing AI provided SQL query. Get the SQL query + // `EXECUTING_QUERY`: Executing a generated SQL query. Get the SQL query // result by calling - // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. - // **Important: The message status will stay in the `EXECUTING_QUERY` until - // a client calls - // [getMessageQueryResult](:method:genie/getMessageQueryResult)**. * - // `FAILED`: Generating a response or the executing the query failed. Please - // see `error` field. * `COMPLETED`: Message processing is completed. - // Results are in the `attachments` field. Get the SQL query result by - // calling [getMessageQueryResult](:method:genie/getMessageQueryResult) API. - // * `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL - // result is not available anymore. The user needs to execute the query - // again. * `CANCELLED`: Message has been cancelled. + // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * + // `FAILED`: The response generation or query execution failed. See `error` + // field. * `COMPLETED`: Message processing is completed. Results are in the + // `attachments` field. Get the SQL query result by calling + // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * + // `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL + // result is not available anymore. The user needs to rerun the query. * + // `CANCELLED`: Message has been cancelled. Status MessageStatus `json:"status,omitempty"` // ID of the user who created the message UserId int64 `json:"user_id,omitempty"` @@ -426,6 +391,67 @@ func (s GenieMessage) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type GenieQueryAttachment struct { + // Description of the query + Description string `json:"description,omitempty"` + + Id string `json:"id,omitempty"` + // Time when the user updated the query last + LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` + // AI generated SQL query + Query string `json:"query,omitempty"` + // Metadata associated with the query result. + QueryResultMetadata *GenieResultMetadata `json:"query_result_metadata,omitempty"` + // Name of the query + Title string `json:"title,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GenieQueryAttachment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieQueryAttachment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GenieResultMetadata struct { + // Indicates whether the result set is truncated. + IsTruncated bool `json:"is_truncated,omitempty"` + // The number of rows in the result set. + RowCount int64 `json:"row_count,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GenieResultMetadata) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieResultMetadata) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GenieSpace struct { + // Description of the Genie Space + Description string `json:"description,omitempty"` + // Space ID + SpaceId string `json:"space_id"` + // Title of the Genie Space + Title string `json:"title"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GenieSpace) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieSpace) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type GenieStartConversationMessageRequest struct { // The text of the message that starts the conversation. Content string `json:"content"` @@ -673,6 +699,8 @@ const MessageErrorTypeFunctionArgumentsInvalidException MessageErrorType = `FUNC const MessageErrorTypeFunctionArgumentsInvalidJsonException MessageErrorType = `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION` +const MessageErrorTypeFunctionArgumentsInvalidTypeException MessageErrorType = `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION` + const MessageErrorTypeFunctionCallMissingParameterException MessageErrorType = `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION` const MessageErrorTypeGenericChatCompletionException MessageErrorType = `GENERIC_CHAT_COMPLETION_EXCEPTION` @@ -697,6 +725,8 @@ const MessageErrorTypeInvalidTableIdentifierException MessageErrorType = `INVALI const MessageErrorTypeLocalContextExceededException MessageErrorType = `LOCAL_CONTEXT_EXCEEDED_EXCEPTION` +const MessageErrorTypeMessageCancelledWhileExecutingException MessageErrorType = `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION` + const MessageErrorTypeMessageDeletedWhileExecutingException MessageErrorType = `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION` const MessageErrorTypeMessageUpdatedWhileExecutingException MessageErrorType = `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION` @@ -741,11 +771,11 @@ func (f *MessageErrorType) String() string { // Set raw string value and validate it against allowed values func (f *MessageErrorType) Set(v string) error { switch v { - case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: + case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: *f = MessageErrorType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) + return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) } } @@ -754,25 +784,23 @@ func (f *MessageErrorType) Type() string { return "MessageErrorType" } -// MesssageStatus. The possible values are: * `FETCHING_METADATA`: Fetching +// MessageStatus. The possible values are: * `FETCHING_METADATA`: Fetching // metadata from the data sources. * `FILTERING_CONTEXT`: Running smart context // step to determine relevant context. * `ASKING_AI`: Waiting for the LLM to -// respond to the users question. * `PENDING_WAREHOUSE`: Waiting for warehouse -// before the SQL query can start executing. * `EXECUTING_QUERY`: Executing AI -// provided SQL query. Get the SQL query result by calling -// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. -// **Important: The message status will stay in the `EXECUTING_QUERY` until a -// client calls [getMessageQueryResult](:method:genie/getMessageQueryResult)**. -// * `FAILED`: Generating a response or the executing the query failed. Please -// see `error` field. * `COMPLETED`: Message processing is completed. Results -// are in the `attachments` field. Get the SQL query result by calling +// respond to the user's question. * `PENDING_WAREHOUSE`: Waiting for warehouse +// before the SQL query can start executing. * `EXECUTING_QUERY`: Executing a +// generated SQL query. Get the SQL query result by calling +// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * `FAILED`: +// The response generation or query execution failed. See `error` field. * +// `COMPLETED`: Message processing is completed. Results are in the +// `attachments` field. Get the SQL query result by calling // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * // `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL result -// is not available anymore. The user needs to execute the query again. * -// `CANCELLED`: Message has been cancelled. +// is not available anymore. The user needs to rerun the query. * `CANCELLED`: +// Message has been cancelled. type MessageStatus string -// Waiting for the LLM to respond to the users question. +// Waiting for the LLM to respond to the user's question. const MessageStatusAskingAi MessageStatus = `ASKING_AI` // Message has been cancelled. @@ -783,14 +811,11 @@ const MessageStatusCancelled MessageStatus = `CANCELLED` // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. const MessageStatusCompleted MessageStatus = `COMPLETED` -// Executing AI provided SQL query. Get the SQL query result by calling +// Executing a generated SQL query. Get the SQL query result by calling // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. -// **Important: The message status will stay in the `EXECUTING_QUERY` until a -// client calls [getMessageQueryResult](:method:genie/getMessageQueryResult)**. const MessageStatusExecutingQuery MessageStatus = `EXECUTING_QUERY` -// Generating a response or the executing the query failed. Please see `error` -// field. +// The response generation or query execution failed. See `error` field. const MessageStatusFailed MessageStatus = `FAILED` // Fetching metadata from the data sources. @@ -802,8 +827,7 @@ const MessageStatusFilteringContext MessageStatus = `FILTERING_CONTEXT` // Waiting for warehouse before the SQL query can start executing. const MessageStatusPendingWarehouse MessageStatus = `PENDING_WAREHOUSE` -// SQL result is not available anymore. The user needs to execute the query -// again. +// SQL result is not available anymore. The user needs to rerun the query. const MessageStatusQueryResultExpired MessageStatus = `QUERY_RESULT_EXPIRED` // Message has been submitted. @@ -921,38 +945,6 @@ func (s PublishedDashboard) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type QueryAttachment struct { - CachedQuerySchema *QuerySchema `json:"cached_query_schema,omitempty"` - // Description of the query - Description string `json:"description,omitempty"` - - Id string `json:"id,omitempty"` - // If the query was created on an instruction (trusted asset) we link to the - // id - InstructionId string `json:"instruction_id,omitempty"` - // Always store the title next to the id in case the original instruction - // title changes or the instruction is deleted. - InstructionTitle string `json:"instruction_title,omitempty"` - // Time when the user updated the query last - LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` - // AI generated SQL query - Query string `json:"query,omitempty"` - - StatementId string `json:"statement_id,omitempty"` - // Name of the query - Title string `json:"title,omitempty"` - - ForceSendFields []string `json:"-" url:"-"` -} - -func (s *QueryAttachment) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s QueryAttachment) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - type QueryResponseStatus struct { // Represents an empty message, similar to google.protobuf.Empty, which is // not available in the firm right now. @@ -981,34 +973,6 @@ func (s QueryResponseStatus) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type QuerySchema struct { - Columns []QuerySchemaColumn `json:"columns,omitempty"` - // Used to determine if the stored query schema is compatible with the - // latest run. The service should always clear the schema when the query is - // re-executed. - StatementId string `json:"statement_id,omitempty"` - - ForceSendFields []string `json:"-" url:"-"` -} - -func (s *QuerySchema) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s QuerySchema) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -type QuerySchemaColumn struct { - // Populated from - // https://docs.databricks.com/sql/language-manual/sql-ref-datatypes.html - DataType DataType `json:"data_type"` - - Name string `json:"name"` - // Corresponds to type desc - TypeText string `json:"type_text"` -} - type Result struct { // If result is truncated IsTruncated bool `json:"is_truncated,omitempty"` diff --git a/service/jobs/model.go b/service/jobs/model.go index 86fb2be2c..2d9df43db 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -73,10 +73,9 @@ type BaseRun struct { // Description of the run Description string `json:"description,omitempty"` // effective_performance_target is the actual performance target used by the - // run during execution. effective_performance_target can differ from - // performance_target depending on if the job was eligible to be - // cost-optimized (e.g. contains at least 1 serverless task) or if we - // specifically override the value for the run (ex. RunNow). + // run during execution. effective_performance_target can differ from the + // client-set performance_target depending on if the job was eligible to be + // cost-optimized. EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. @@ -428,6 +427,26 @@ func (s ClusterSpec) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Next field: 4 +type ComputeConfig struct { + // IDof the GPU pool to use. + GpuNodePoolId string `json:"gpu_node_pool_id"` + // GPU type. + GpuType string `json:"gpu_type,omitempty"` + // Number of GPUs. + NumGpus int `json:"num_gpus"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ComputeConfig) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ComputeConfig) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type Condition string const ConditionAllUpdated Condition = `ALL_UPDATED` @@ -981,6 +1000,51 @@ func (f *Format) Type() string { return "Format" } +// Next field: 9 +type GenAiComputeTask struct { + // Command launcher to run the actual script, e.g. bash, python etc. + Command string `json:"command,omitempty"` + // Next field: 4 + Compute *ComputeConfig `json:"compute,omitempty"` + // Runtime image + DlRuntimeImage string `json:"dl_runtime_image"` + // Optional string containing the name of the MLflow experiment to log the + // run to. If name is not found, backend will create the mlflow experiment + // using the name. + MlflowExperimentName string `json:"mlflow_experiment_name,omitempty"` + // Optional location type of the training script. When set to `WORKSPACE`, + // the script will be retrieved from the local Databricks workspace. When + // set to `GIT`, the script will be retrieved from a Git repository defined + // in `git_source`. If the value is empty, the task will use `GIT` if + // `git_source` is defined and `WORKSPACE` otherwise. * `WORKSPACE`: Script + // is located in Databricks workspace. * `GIT`: Script is located in cloud + // Git provider. + Source Source `json:"source,omitempty"` + // The training script file path to be executed. Cloud file URIs (such as + // dbfs:/, s3:/, adls:/, gcs:/) and workspace paths are supported. For + // python files stored in the Databricks workspace, the path must be + // absolute and begin with `/`. For files stored in a remote repository, the + // path must be relative. This field is required. + TrainingScriptPath string `json:"training_script_path,omitempty"` + // Optional string containing model parameters passed to the training script + // in yaml format. If present, then the content in yaml_parameters_file_path + // will be ignored. + YamlParameters string `json:"yaml_parameters,omitempty"` + // Optional path to a YAML file containing model parameters passed to the + // training script. + YamlParametersFilePath string `json:"yaml_parameters_file_path,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GenAiComputeTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenAiComputeTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get job permission levels type GetJobPermissionLevelsRequest struct { // The job for which to get or manage permissions. @@ -2662,10 +2726,9 @@ type Run struct { // Description of the run Description string `json:"description,omitempty"` // effective_performance_target is the actual performance target used by the - // run during execution. effective_performance_target can differ from - // performance_target depending on if the job was eligible to be - // cost-optimized (e.g. contains at least 1 serverless task) or if we - // specifically override the value for the run (ex. RunNow). + // run during execution. effective_performance_target can differ from the + // client-set performance_target depending on if the job was eligible to be + // cost-optimized. EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. @@ -3096,6 +3159,8 @@ const RunLifecycleStateV2StateTerminated RunLifecycleStateV2State = `TERMINATED` const RunLifecycleStateV2StateTerminating RunLifecycleStateV2State = `TERMINATING` +const RunLifecycleStateV2StateWaiting RunLifecycleStateV2State = `WAITING` + // String representation for [fmt.Print] func (f *RunLifecycleStateV2State) String() string { return string(*f) @@ -3104,11 +3169,11 @@ func (f *RunLifecycleStateV2State) String() string { // Set raw string value and validate it against allowed values func (f *RunLifecycleStateV2State) Set(v string) error { switch v { - case `BLOCKED`, `PENDING`, `QUEUED`, `RUNNING`, `TERMINATED`, `TERMINATING`: + case `BLOCKED`, `PENDING`, `QUEUED`, `RUNNING`, `TERMINATED`, `TERMINATING`, `WAITING`: *f = RunLifecycleStateV2State(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BLOCKED", "PENDING", "QUEUED", "RUNNING", "TERMINATED", "TERMINATING"`, v) + return fmt.Errorf(`value "%s" is not one of "BLOCKED", "PENDING", "QUEUED", "RUNNING", "TERMINATED", "TERMINATING", "WAITING"`, v) } } @@ -3179,8 +3244,8 @@ type RunNow struct { // provided, all tasks in the job will be run. Only []string `json:"only,omitempty"` // PerformanceTarget defines how performant or cost efficient the execution - // of run on serverless compute should be. For RunNow request, the run will - // execute with this settings instead of ones defined in job. + // of run on serverless compute should be. For RunNow, this performance + // target will override the target defined on the job-level. PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` @@ -3547,10 +3612,9 @@ type RunTask struct { // do not execute and are immediately skipped as soon as they are unblocked. Disabled bool `json:"disabled,omitempty"` // effective_performance_target is the actual performance target used by the - // run during execution. effective_performance_target can differ from - // performance_target depending on if the job was eligible to be - // cost-optimized (e.g. contains at least 1 serverless task) or if an - // override was provided for the run (ex. RunNow). + // run during execution. effective_performance_target can differ from the + // client-set performance_target depending on if the job was eligible to be + // cost-optimized. EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. @@ -3578,6 +3642,8 @@ type RunTask struct { // The task executes a nested task for every input provided when the // `for_each_task` field is present. ForEachTask *RunForEachTask `json:"for_each_task,omitempty"` + // Next field: 9 + GenAiComputeTask *GenAiComputeTask `json:"gen_ai_compute_task,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by // notebook, dbt, Python script, and SQL File tasks. If `git_source` is set, @@ -4267,6 +4333,8 @@ type SubmitTask struct { // The task executes a nested task for every input provided when the // `for_each_task` field is present. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` + // Next field: 9 + GenAiComputeTask *GenAiComputeTask `json:"gen_ai_compute_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` // An optional list of libraries to be installed on the cluster. The default @@ -4412,6 +4480,8 @@ type Task struct { // The task executes a nested task for every input provided when the // `for_each_task` field is present. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` + // Next field: 9 + GenAiComputeTask *GenAiComputeTask `json:"gen_ai_compute_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` // If job_cluster_key, this task is executed reusing the cluster specified diff --git a/service/ml/api.go b/service/ml/api.go index 47ca2f98d..ee8f4856d 100755 --- a/service/ml/api.go +++ b/service/ml/api.go @@ -19,21 +19,21 @@ type ExperimentsInterface interface { // already exist and fails if another experiment with the same name already // exists. // - // Throws `RESOURCE_ALREADY_EXISTS` if a experiment with the given name exists. + // Throws `RESOURCE_ALREADY_EXISTS` if an experiment with the given name exists. CreateExperiment(ctx context.Context, request CreateExperiment) (*CreateExperimentResponse, error) // Create a run. // // Creates a new run within an experiment. A run is usually a single execution // of a machine learning or data ETL pipeline. MLflow uses runs to track the - // `mlflowParam`, `mlflowMetric` and `mlflowRunTag` associated with a single + // `mlflowParam`, `mlflowMetric`, and `mlflowRunTag` associated with a single // execution. CreateRun(ctx context.Context, request CreateRun) (*CreateRunResponse, error) // Delete an experiment. // // Marks an experiment and associated metadata, runs, metrics, params, and tags - // for deletion. If the experiment uses FileStore, artifacts associated with + // for deletion. If the experiment uses FileStore, artifacts associated with the // experiment are also deleted. DeleteExperiment(ctx context.Context, request DeleteExperiment) error @@ -47,16 +47,15 @@ type ExperimentsInterface interface { // Bulk delete runs in an experiment that were created prior to or at the // specified timestamp. Deletes at most max_runs per request. To call this API // from a Databricks Notebook in Python, you can use the client code snippet on - // https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete. DeleteRuns(ctx context.Context, request DeleteRuns) (*DeleteRunsResponse, error) - // Delete a tag. + // Delete a tag on a run. // // Deletes a tag on a run. Tags are run metadata that can be updated during a // run and after a run completes. DeleteTag(ctx context.Context, request DeleteTag) error - // Get metadata. + // Get an experiment by name. // // Gets metadata for an experiment. // @@ -67,21 +66,21 @@ type ExperimentsInterface interface { // // Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name // exists. - GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentResponse, error) + GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentByNameResponse, error) // Get an experiment. // // Gets metadata for an experiment. This method works on deleted experiments. GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) - // Get history of a given metric within a run. + // Get metric history for a run. // // Gets a list of all values for the specified metric for a given run. // // This method is generated by Databricks SDK Code Generator. GetHistory(ctx context.Context, request GetHistoryRequest) listing.Iterator[Metric] - // Get history of a given metric within a run. + // Get metric history for a run. // // Gets a list of all values for the specified metric for a given run. // @@ -120,11 +119,10 @@ type ExperimentsInterface interface { // these values. GetRun(ctx context.Context, request GetRunRequest) (*GetRunResponse, error) - // Get all artifacts. + // List artifacts. // - // List artifacts for a run. Takes an optional `artifact_path` prefix. If it is - // specified, the response contains only artifacts with the specified prefix. - // This API does not support pagination when listing artifacts in UC Volumes. A + // List artifacts for a run. Takes an optional `artifact_path` prefix which if + // specified, the response contains only artifacts with the specified prefix. A // maximum of 1000 artifacts will be retrieved for UC Volumes. Please call // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC // Volumes, which supports pagination. See [List directory contents | Files @@ -133,11 +131,10 @@ type ExperimentsInterface interface { // This method is generated by Databricks SDK Code Generator. ListArtifacts(ctx context.Context, request ListArtifactsRequest) listing.Iterator[FileInfo] - // Get all artifacts. + // List artifacts. // - // List artifacts for a run. Takes an optional `artifact_path` prefix. If it is - // specified, the response contains only artifacts with the specified prefix. - // This API does not support pagination when listing artifacts in UC Volumes. A + // List artifacts for a run. Takes an optional `artifact_path` prefix which if + // specified, the response contains only artifacts with the specified prefix. A // maximum of 1000 artifacts will be retrieved for UC Volumes. Please call // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC // Volumes, which supports pagination. See [List directory contents | Files @@ -160,7 +157,7 @@ type ExperimentsInterface interface { // This method is generated by Databricks SDK Code Generator. ListExperimentsAll(ctx context.Context, request ListExperimentsRequest) ([]Experiment, error) - // Log a batch. + // Log a batch of metrics/params/tags for a run. // // Logs a batch of metrics, params, and tags for a run. If any data failed to be // persisted, the server will respond with an error (non-200 status code). @@ -189,8 +186,13 @@ type ExperimentsInterface interface { // Request Limits ------------------------------- A single JSON-serialized API // request may be up to 1 MB in size and contain: // - // * No more than 1000 metrics, params, and tags in total * Up to 1000 metrics * - // Up to 100 params * Up to 100 tags + // * No more than 1000 metrics, params, and tags in total + // + // * Up to 1000 metrics + // + // * Up to 100 params + // + // * Up to 100 tags // // For example, a valid request might contain 900 metrics, 50 params, and 50 // tags, but logging 900 metrics, 50 params, and 51 tags is invalid. @@ -198,6 +200,7 @@ type ExperimentsInterface interface { // The following limits also apply to metric, param, and tag keys and values: // // * Metric keys, param keys, and tag keys can be up to 250 characters in length + // // * Parameter and tag values can be up to 250 characters in length LogBatch(ctx context.Context, request LogBatch) error @@ -205,11 +208,13 @@ type ExperimentsInterface interface { // // **NOTE:** Experimental: This API may change or be removed in a future release // without warning. + // + // Logs inputs, such as datasets and models, to an MLflow Run. LogInputs(ctx context.Context, request LogInputs) error - // Log a metric. + // Log a metric for a run. // - // Logs a metric for a run. A metric is a key-value pair (string key, float + // Log a metric for a run. A metric is a key-value pair (string key, float // value) with an associated timestamp. Examples include the various metrics // that represent ML model accuracy. A metric can be logged multiple times. LogMetric(ctx context.Context, request LogMetric) error @@ -220,7 +225,7 @@ type ExperimentsInterface interface { // without warning. LogModel(ctx context.Context, request LogModel) error - // Log a param. + // Log a param for a run. // // Logs a param used for a run. A param is a key-value pair (string key, string // value). Examples include hyperparameters used for ML model training and @@ -228,7 +233,7 @@ type ExperimentsInterface interface { // once for a run. LogParam(ctx context.Context, request LogParam) error - // Restores an experiment. + // Restore an experiment. // // Restore an experiment marked for deletion. This also restores associated // metadata, runs, metrics, params, and tags. If experiment uses FileStore, @@ -240,7 +245,11 @@ type ExperimentsInterface interface { // Restore a run. // - // Restores a deleted run. + // Restores a deleted run. This also restores associated metadata, runs, + // metrics, params, and tags. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if the run was never created or was + // permanently deleted. RestoreRun(ctx context.Context, request RestoreRun) error // Restore runs by deletion time. @@ -248,7 +257,6 @@ type ExperimentsInterface interface { // Bulk restore runs in an experiment that were deleted no earlier than the // specified timestamp. Restores at most max_runs per request. To call this API // from a Databricks Notebook in Python, you can use the client code snippet on - // https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore. RestoreRuns(ctx context.Context, request RestoreRuns) (*RestoreRunsResponse, error) // Search experiments. @@ -269,7 +277,7 @@ type ExperimentsInterface interface { // // Searches for runs that satisfy expressions. // - // Search expressions can use `mlflowMetric` and `mlflowParam` keys.", + // Search expressions can use `mlflowMetric` and `mlflowParam` keys. // // This method is generated by Databricks SDK Code Generator. SearchRuns(ctx context.Context, request SearchRuns) listing.Iterator[Run] @@ -278,12 +286,12 @@ type ExperimentsInterface interface { // // Searches for runs that satisfy expressions. // - // Search expressions can use `mlflowMetric` and `mlflowParam` keys.", + // Search expressions can use `mlflowMetric` and `mlflowParam` keys. // // This method is generated by Databricks SDK Code Generator. SearchRunsAll(ctx context.Context, request SearchRuns) ([]Run, error) - // Set a tag. + // Set a tag for an experiment. // // Sets a tag on an experiment. Experiment tags are metadata that can be // updated. @@ -296,7 +304,7 @@ type ExperimentsInterface interface { // permissions from their root object. SetPermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) - // Set a tag. + // Set a tag for a run. // // Sets a tag on a run. Tags are run metadata that can be updated during a run // and after a run completes. diff --git a/service/ml/impl.go b/service/ml/impl.go index fcf826960..974feeb5a 100755 --- a/service/ml/impl.go +++ b/service/ml/impl.go @@ -83,14 +83,14 @@ func (a *experimentsImpl) DeleteTag(ctx context.Context, request DeleteTag) erro return err } -func (a *experimentsImpl) GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentResponse, error) { - var getExperimentResponse GetExperimentResponse +func (a *experimentsImpl) GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentByNameResponse, error) { + var getExperimentByNameResponse GetExperimentByNameResponse path := "/api/2.0/mlflow/experiments/get-by-name" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExperimentResponse) - return &getExperimentResponse, err + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getExperimentByNameResponse) + return &getExperimentByNameResponse, err } func (a *experimentsImpl) GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) { @@ -103,7 +103,7 @@ func (a *experimentsImpl) GetExperiment(ctx context.Context, request GetExperime return &getExperimentResponse, err } -// Get history of a given metric within a run. +// Get metric history for a run. // // Gets a list of all values for the specified metric for a given run. func (a *experimentsImpl) GetHistory(ctx context.Context, request GetHistoryRequest) listing.Iterator[Metric] { @@ -130,13 +130,12 @@ func (a *experimentsImpl) GetHistory(ctx context.Context, request GetHistoryRequ return iterator } -// Get history of a given metric within a run. +// Get metric history for a run. // // Gets a list of all values for the specified metric for a given run. func (a *experimentsImpl) GetHistoryAll(ctx context.Context, request GetHistoryRequest) ([]Metric, error) { iterator := a.GetHistory(ctx, request) - return listing.ToSliceN[Metric, int](ctx, iterator, request.MaxResults) - + return listing.ToSlice[Metric](ctx, iterator) } func (a *experimentsImpl) internalGetHistory(ctx context.Context, request GetHistoryRequest) (*GetMetricHistoryResponse, error) { @@ -179,11 +178,10 @@ func (a *experimentsImpl) GetRun(ctx context.Context, request GetRunRequest) (*G return &getRunResponse, err } -// Get all artifacts. +// List artifacts. // -// List artifacts for a run. Takes an optional `artifact_path` prefix. If it is -// specified, the response contains only artifacts with the specified prefix. -// This API does not support pagination when listing artifacts in UC Volumes. A +// List artifacts for a run. Takes an optional `artifact_path` prefix which if +// specified, the response contains only artifacts with the specified prefix. A // maximum of 1000 artifacts will be retrieved for UC Volumes. Please call // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC // Volumes, which supports pagination. See [List directory contents | Files @@ -212,11 +210,10 @@ func (a *experimentsImpl) ListArtifacts(ctx context.Context, request ListArtifac return iterator } -// Get all artifacts. +// List artifacts. // -// List artifacts for a run. Takes an optional `artifact_path` prefix. If it is -// specified, the response contains only artifacts with the specified prefix. -// This API does not support pagination when listing artifacts in UC Volumes. A +// List artifacts for a run. Takes an optional `artifact_path` prefix which if +// specified, the response contains only artifacts with the specified prefix. A // maximum of 1000 artifacts will be retrieved for UC Volumes. Please call // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC // Volumes, which supports pagination. See [List directory contents | Files @@ -268,8 +265,7 @@ func (a *experimentsImpl) ListExperiments(ctx context.Context, request ListExper // Gets a list of all experiments. func (a *experimentsImpl) ListExperimentsAll(ctx context.Context, request ListExperimentsRequest) ([]Experiment, error) { iterator := a.ListExperiments(ctx, request) - return listing.ToSliceN[Experiment, int](ctx, iterator, request.MaxResults) - + return listing.ToSlice[Experiment](ctx, iterator) } func (a *experimentsImpl) internalListExperiments(ctx context.Context, request ListExperimentsRequest) (*ListExperimentsResponse, error) { @@ -420,7 +416,7 @@ func (a *experimentsImpl) internalSearchExperiments(ctx context.Context, request // // Searches for runs that satisfy expressions. // -// Search expressions can use `mlflowMetric` and `mlflowParam` keys.", +// Search expressions can use `mlflowMetric` and `mlflowParam` keys. func (a *experimentsImpl) SearchRuns(ctx context.Context, request SearchRuns) listing.Iterator[Run] { getNextPage := func(ctx context.Context, req SearchRuns) (*SearchRunsResponse, error) { @@ -449,7 +445,7 @@ func (a *experimentsImpl) SearchRuns(ctx context.Context, request SearchRuns) li // // Searches for runs that satisfy expressions. // -// Search expressions can use `mlflowMetric` and `mlflowParam` keys.", +// Search expressions can use `mlflowMetric` and `mlflowParam` keys. func (a *experimentsImpl) SearchRunsAll(ctx context.Context, request SearchRuns) ([]Run, error) { iterator := a.SearchRuns(ctx, request) return listing.ToSlice[Run](ctx, iterator) diff --git a/service/ml/interface.go b/service/ml/interface.go index 5e43a0021..53f2f3f0b 100755 --- a/service/ml/interface.go +++ b/service/ml/interface.go @@ -24,7 +24,7 @@ type ExperimentsService interface { // already exist and fails if another experiment with the same name already // exists. // - // Throws `RESOURCE_ALREADY_EXISTS` if a experiment with the given name + // Throws `RESOURCE_ALREADY_EXISTS` if an experiment with the given name // exists. CreateExperiment(ctx context.Context, request CreateExperiment) (*CreateExperimentResponse, error) @@ -32,7 +32,7 @@ type ExperimentsService interface { // // Creates a new run within an experiment. A run is usually a single // execution of a machine learning or data ETL pipeline. MLflow uses runs to - // track the `mlflowParam`, `mlflowMetric` and `mlflowRunTag` associated + // track the `mlflowParam`, `mlflowMetric`, and `mlflowRunTag` associated // with a single execution. CreateRun(ctx context.Context, request CreateRun) (*CreateRunResponse, error) @@ -40,7 +40,7 @@ type ExperimentsService interface { // // Marks an experiment and associated metadata, runs, metrics, params, and // tags for deletion. If the experiment uses FileStore, artifacts associated - // with experiment are also deleted. + // with the experiment are also deleted. DeleteExperiment(ctx context.Context, request DeleteExperiment) error // Delete a run. @@ -54,16 +54,15 @@ type ExperimentsService interface { // specified timestamp. Deletes at most max_runs per request. To call this // API from a Databricks Notebook in Python, you can use the client code // snippet on - // https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-delete. DeleteRuns(ctx context.Context, request DeleteRuns) (*DeleteRunsResponse, error) - // Delete a tag. + // Delete a tag on a run. // // Deletes a tag on a run. Tags are run metadata that can be updated during // a run and after a run completes. DeleteTag(ctx context.Context, request DeleteTag) error - // Get metadata. + // Get an experiment by name. // // Gets metadata for an experiment. // @@ -74,7 +73,7 @@ type ExperimentsService interface { // // Throws `RESOURCE_DOES_NOT_EXIST` if no experiment with the specified name // exists. - GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentResponse, error) + GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentByNameResponse, error) // Get an experiment. // @@ -82,7 +81,7 @@ type ExperimentsService interface { // experiments. GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) - // Get history of a given metric within a run. + // Get metric history for a run. // // Gets a list of all values for the specified metric for a given run. // @@ -110,12 +109,11 @@ type ExperimentsService interface { // maximum of these values. GetRun(ctx context.Context, request GetRunRequest) (*GetRunResponse, error) - // Get all artifacts. + // List artifacts. // - // List artifacts for a run. Takes an optional `artifact_path` prefix. If it - // is specified, the response contains only artifacts with the specified - // prefix. This API does not support pagination when listing artifacts in UC - // Volumes. A maximum of 1000 artifacts will be retrieved for UC Volumes. + // List artifacts for a run. Takes an optional `artifact_path` prefix which + // if specified, the response contains only artifacts with the specified + // prefix. A maximum of 1000 artifacts will be retrieved for UC Volumes. // Please call `/api/2.0/fs/directories{directory_path}` for listing // artifacts in UC Volumes, which supports pagination. See [List directory // contents | Files API](/api/workspace/files/listdirectorycontents). @@ -130,7 +128,7 @@ type ExperimentsService interface { // Use ListExperimentsAll() to get all Experiment instances, which will iterate over every result page. ListExperiments(ctx context.Context, request ListExperimentsRequest) (*ListExperimentsResponse, error) - // Log a batch. + // Log a batch of metrics/params/tags for a run. // // Logs a batch of metrics, params, and tags for a run. If any data failed // to be persisted, the server will respond with an error (non-200 status @@ -163,8 +161,13 @@ type ExperimentsService interface { // Request Limits ------------------------------- A single JSON-serialized // API request may be up to 1 MB in size and contain: // - // * No more than 1000 metrics, params, and tags in total * Up to 1000 - // metrics * Up to 100 params * Up to 100 tags + // * No more than 1000 metrics, params, and tags in total + // + // * Up to 1000 metrics + // + // * Up to 100 params + // + // * Up to 100 tags // // For example, a valid request might contain 900 metrics, 50 params, and 50 // tags, but logging 900 metrics, 50 params, and 51 tags is invalid. @@ -173,18 +176,22 @@ type ExperimentsService interface { // values: // // * Metric keys, param keys, and tag keys can be up to 250 characters in - // length * Parameter and tag values can be up to 250 characters in length + // length + // + // * Parameter and tag values can be up to 250 characters in length LogBatch(ctx context.Context, request LogBatch) error // Log inputs to a run. // // **NOTE:** Experimental: This API may change or be removed in a future // release without warning. + // + // Logs inputs, such as datasets and models, to an MLflow Run. LogInputs(ctx context.Context, request LogInputs) error - // Log a metric. + // Log a metric for a run. // - // Logs a metric for a run. A metric is a key-value pair (string key, float + // Log a metric for a run. A metric is a key-value pair (string key, float // value) with an associated timestamp. Examples include the various metrics // that represent ML model accuracy. A metric can be logged multiple times. LogMetric(ctx context.Context, request LogMetric) error @@ -195,7 +202,7 @@ type ExperimentsService interface { // release without warning. LogModel(ctx context.Context, request LogModel) error - // Log a param. + // Log a param for a run. // // Logs a param used for a run. A param is a key-value pair (string key, // string value). Examples include hyperparameters used for ML model @@ -203,7 +210,7 @@ type ExperimentsService interface { // can be logged only once for a run. LogParam(ctx context.Context, request LogParam) error - // Restores an experiment. + // Restore an experiment. // // Restore an experiment marked for deletion. This also restores associated // metadata, runs, metrics, params, and tags. If experiment uses FileStore, @@ -215,7 +222,11 @@ type ExperimentsService interface { // Restore a run. // - // Restores a deleted run. + // Restores a deleted run. This also restores associated metadata, runs, + // metrics, params, and tags. + // + // Throws `RESOURCE_DOES_NOT_EXIST` if the run was never created or was + // permanently deleted. RestoreRun(ctx context.Context, request RestoreRun) error // Restore runs by deletion time. @@ -224,7 +235,6 @@ type ExperimentsService interface { // specified timestamp. Restores at most max_runs per request. To call this // API from a Databricks Notebook in Python, you can use the client code // snippet on - // https://learn.microsoft.com/en-us/azure/databricks/mlflow/runs#bulk-restore. RestoreRuns(ctx context.Context, request RestoreRuns) (*RestoreRunsResponse, error) // Search experiments. @@ -238,12 +248,12 @@ type ExperimentsService interface { // // Searches for runs that satisfy expressions. // - // Search expressions can use `mlflowMetric` and `mlflowParam` keys.", + // Search expressions can use `mlflowMetric` and `mlflowParam` keys. // // Use SearchRunsAll() to get all Run instances, which will iterate over every result page. SearchRuns(ctx context.Context, request SearchRuns) (*SearchRunsResponse, error) - // Set a tag. + // Set a tag for an experiment. // // Sets a tag on an experiment. Experiment tags are metadata that can be // updated. @@ -256,7 +266,7 @@ type ExperimentsService interface { // inherit permissions from their root object. SetPermissions(ctx context.Context, request ExperimentPermissionsRequest) (*ExperimentPermissions, error) - // Set a tag. + // Set a tag for a run. // // Sets a tag on a run. Tags are run metadata that can be updated during a // run and after a run completes. diff --git a/service/ml/model.go b/service/ml/model.go index 139a34cc8..04901af1b 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -441,6 +441,8 @@ func (s CreateRegistryWebhook) MarshalJSON() ([]byte, error) { type CreateRun struct { // ID of the associated experiment. ExperimentId string `json:"experiment_id,omitempty"` + // The name of the run. + RunName string `json:"run_name,omitempty"` // Unix timestamp in milliseconds of when the run started. StartTime int64 `json:"start_time,omitempty"` // Additional metadata for run. @@ -504,13 +506,15 @@ type CreateWebhookResponse struct { Webhook *RegistryWebhook `json:"webhook,omitempty"` } +// Dataset. Represents a reference to data used for training, testing, or +// evaluation during the model development process. type Dataset struct { // Dataset digest, e.g. an md5 hash of the dataset that uniquely identifies // it within datasets of the same name. - Digest string `json:"digest,omitempty"` + Digest string `json:"digest"` // The name of the dataset. E.g. “my.uc.table@2” “nyc-taxi-dataset”, // “fantastic-elk-3” - Name string `json:"name,omitempty"` + Name string `json:"name"` // The profile of the dataset. Summary statistics for the dataset, such as // the number of rows in a table, the mean / std / mode of each column in a // table, or the number of elements in an array. @@ -518,13 +522,13 @@ type Dataset struct { // The schema of the dataset. E.g., MLflow ColSpec JSON for a dataframe, // MLflow TensorSpec JSON for an ndarray, or another schema format. Schema string `json:"schema,omitempty"` - // The type of the dataset source, e.g. ‘databricks-uc-table’, - // ‘DBFS’, ‘S3’, ... - Source string `json:"source,omitempty"` // Source information for the dataset. Note that the source may not exactly // reproduce the dataset if it was transformed / modified before use with // MLflow. - SourceType string `json:"source_type,omitempty"` + Source string `json:"source"` + // The type of the dataset source, e.g. ‘databricks-uc-table’, + // ‘DBFS’, ‘S3’, ... + SourceType string `json:"source_type"` ForceSendFields []string `json:"-" url:"-"` } @@ -537,9 +541,10 @@ func (s Dataset) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// DatasetInput. Represents a dataset and input tags. type DatasetInput struct { // The dataset being used as a Run input. - Dataset *Dataset `json:"dataset,omitempty"` + Dataset Dataset `json:"dataset"` // A list of tags for the dataset input, e.g. a “context” tag with value // “training” Tags []InputTag `json:"tags,omitempty"` @@ -749,6 +754,7 @@ func (s DeleteWebhookRequest) MarshalJSON() ([]byte, error) { type DeleteWebhookResponse struct { } +// An experiment and its metadata. type Experiment struct { // Location where artifacts for the experiment are stored. ArtifactLocation string `json:"artifact_location,omitempty"` @@ -909,6 +915,7 @@ type ExperimentPermissionsRequest struct { ExperimentId string `json:"-" url:"-"` } +// A tag for an experiment. type ExperimentTag struct { // The tag key. Key string `json:"key,omitempty"` @@ -926,6 +933,7 @@ func (s ExperimentTag) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Metadata of a single artifact file or directory. type FileInfo struct { // Size in bytes. Unset for directories. FileSize int64 `json:"file_size,omitempty"` @@ -945,12 +953,17 @@ func (s FileInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Get metadata +// Get an experiment by name type GetByNameRequest struct { // Name of the associated experiment. ExperimentName string `json:"-" url:"experiment_name"` } +type GetExperimentByNameResponse struct { + // Experiment details. + Experiment *Experiment `json:"experiment,omitempty"` +} + // Get experiment permission levels type GetExperimentPermissionLevelsRequest struct { // The experiment for which to get or manage permissions. @@ -979,7 +992,7 @@ type GetExperimentResponse struct { Experiment *Experiment `json:"experiment,omitempty"` } -// Get history of a given metric within a run +// Get metric history for a run type GetHistoryRequest struct { // Maximum number of Metric records to return per paginated request. Default // is set to 25,000. If set higher than 25,000, a request Exception will be @@ -991,8 +1004,8 @@ type GetHistoryRequest struct { PageToken string `json:"-" url:"page_token,omitempty"` // ID of the run from which to fetch metric values. Must be provided. RunId string `json:"-" url:"run_id,omitempty"` - // [Deprecated, use run_id instead] ID of the run from which to fetch metric - // values. This field will be removed in a future MLflow version. + // [Deprecated, use `run_id` instead] ID of the run from which to fetch + // metric values. This field will be removed in a future MLflow version. RunUuid string `json:"-" url:"run_uuid,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1021,10 +1034,14 @@ type GetLatestVersionsResponse struct { } type GetMetricHistoryResponse struct { - // All logged values for this metric. + // All logged values for this metric if `max_results` is not specified in + // the request or if the total count of metrics returned is less than the + // service level pagination threshold. Otherwise, this is one page of + // results. Metrics []Metric `json:"metrics,omitempty"` - // Token that can be used to retrieve the next page of metric history - // results + // A token that can be used to issue a query for the next page of metric + // history values. A missing token indicates that no additional metrics are + // available to fetch. NextPageToken string `json:"next_page_token,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1104,8 +1121,8 @@ type GetRegisteredModelPermissionsRequest struct { type GetRunRequest struct { // ID of the run to fetch. Must be provided. RunId string `json:"-" url:"run_id"` - // [Deprecated, use run_id instead] ID of the run to fetch. This field will - // be removed in a future MLflow version. + // [Deprecated, use `run_id` instead] ID of the run to fetch. This field + // will be removed in a future MLflow version. RunUuid string `json:"-" url:"run_uuid,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1180,21 +1197,12 @@ func (s HttpUrlSpecWithoutSecret) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Tag for a dataset input. type InputTag struct { // The tag key. - Key string `json:"key,omitempty"` + Key string `json:"key"` // The tag value. - Value string `json:"value,omitempty"` - - ForceSendFields []string `json:"-" url:"-"` -} - -func (s *InputTag) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s InputTag) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) + Value string `json:"value"` } type JobSpec struct { @@ -1237,7 +1245,7 @@ func (s JobSpecWithoutSecret) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Get all artifacts +// List artifacts type ListArtifactsRequest struct { // Token indicating the page of artifact results to fetch. `page_token` is // not supported when listing artifacts in UC Volumes. A maximum of 1000 @@ -1251,7 +1259,7 @@ type ListArtifactsRequest struct { Path string `json:"-" url:"path,omitempty"` // ID of the run whose artifacts to list. Must be provided. RunId string `json:"-" url:"run_id,omitempty"` - // [Deprecated, use run_id instead] ID of the run whose artifacts to list. + // [Deprecated, use `run_id` instead] ID of the run whose artifacts to list. // This field will be removed in a future MLflow version. RunUuid string `json:"-" url:"run_uuid,omitempty"` @@ -1292,12 +1300,12 @@ type ListExperimentsRequest struct { // automatically capped at 1000. Callers of this endpoint are encouraged to // pass max_results explicitly and leverage page_token to iterate through // experiments. - MaxResults int `json:"-" url:"max_results,omitempty"` + MaxResults int64 `json:"-" url:"max_results,omitempty"` // Token indicating the page of experiments to fetch PageToken string `json:"-" url:"page_token,omitempty"` // Qualifier for type of experiments to be returned. If unspecified, return // only active experiments. - ViewType string `json:"-" url:"view_type,omitempty"` + ViewType ViewType `json:"-" url:"view_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -1448,17 +1456,7 @@ type LogInputs struct { // Dataset inputs Datasets []DatasetInput `json:"datasets,omitempty"` // ID of the run to log under - RunId string `json:"run_id,omitempty"` - - ForceSendFields []string `json:"-" url:"-"` -} - -func (s *LogInputs) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s LogInputs) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) + RunId string `json:"run_id"` } type LogInputsResponse struct { @@ -1469,7 +1467,7 @@ type LogMetric struct { Key string `json:"key"` // ID of the run under which to log the metric. Must be provided. RunId string `json:"run_id,omitempty"` - // [Deprecated, use run_id instead] ID of the run under which to log the + // [Deprecated, use `run_id` instead] ID of the run under which to log the // metric. This field will be removed in a future MLflow version. RunUuid string `json:"run_uuid,omitempty"` // Step at which to log the metric @@ -1518,7 +1516,7 @@ type LogParam struct { Key string `json:"key"` // ID of the run under which to log the param. Must be provided. RunId string `json:"run_id,omitempty"` - // [Deprecated, use run_id instead] ID of the run under which to log the + // [Deprecated, use `run_id` instead] ID of the run under which to log the // param. This field will be removed in a future MLflow version. RunUuid string `json:"run_uuid,omitempty"` // String value of the param being logged. Maximum size is 500 bytes. @@ -1538,6 +1536,7 @@ func (s LogParam) MarshalJSON() ([]byte, error) { type LogParamResponse struct { } +// Metric associated with a run, represented as a key-value pair. type Metric struct { // Key identifying this metric. Key string `json:"key,omitempty"` @@ -1787,6 +1786,7 @@ func (s ModelVersionTag) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Param associated with a run. type Param struct { // Key identifying this param. Key string `json:"key,omitempty"` @@ -2241,6 +2241,7 @@ func (s RestoreRunsResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// A single run. type Run struct { // Run data. Data *RunData `json:"data,omitempty"` @@ -2250,6 +2251,7 @@ type Run struct { Inputs *RunInputs `json:"inputs,omitempty"` } +// Run data (metrics, params, and tags). type RunData struct { // Run metrics. Metrics []Metric `json:"metrics,omitempty"` @@ -2259,11 +2261,12 @@ type RunData struct { Tags []RunTag `json:"tags,omitempty"` } +// Metadata of a single run. type RunInfo struct { // URI of the directory where artifacts should be uploaded. This can be a // local path (starting with "/"), or a distributed file system (DFS) path, - // like `s3://bucket/directory` or `dbfs:/my/directory`. If not set, the - // local `./mlruns` directory is chosen. + // like ``s3://bucket/directory`` or ``dbfs:/my/directory``. If not set, the + // local ``./mlruns`` directory is chosen. ArtifactUri string `json:"artifact_uri,omitempty"` // Unix timestamp of when the run ended in milliseconds. EndTime int64 `json:"end_time,omitempty"` @@ -2273,6 +2276,8 @@ type RunInfo struct { LifecycleStage string `json:"lifecycle_stage,omitempty"` // Unique identifier for the run. RunId string `json:"run_id,omitempty"` + // The name of the run. + RunName string `json:"run_name,omitempty"` // [Deprecated, use run_id instead] Unique identifier for the run. This // field will be removed in a future MLflow version. RunUuid string `json:"run_uuid,omitempty"` @@ -2296,7 +2301,7 @@ func (s RunInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Current status of the run. +// Status of a run. type RunInfoStatus string const RunInfoStatusFailed RunInfoStatus = `FAILED` @@ -2330,11 +2335,13 @@ func (f *RunInfoStatus) Type() string { return "RunInfoStatus" } +// Run inputs. type RunInputs struct { // Run metrics. DatasetInputs []DatasetInput `json:"dataset_inputs,omitempty"` } +// Tag for a run. type RunTag struct { // The tag key. Key string `json:"key,omitempty"` @@ -2367,7 +2374,7 @@ type SearchExperiments struct { PageToken string `json:"page_token,omitempty"` // Qualifier for type of experiments to be returned. If unspecified, return // only active experiments. - ViewType SearchExperimentsViewType `json:"view_type,omitempty"` + ViewType ViewType `json:"view_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -2398,37 +2405,6 @@ func (s SearchExperimentsResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Qualifier for type of experiments to be returned. If unspecified, return only -// active experiments. -type SearchExperimentsViewType string - -const SearchExperimentsViewTypeActiveOnly SearchExperimentsViewType = `ACTIVE_ONLY` - -const SearchExperimentsViewTypeAll SearchExperimentsViewType = `ALL` - -const SearchExperimentsViewTypeDeletedOnly SearchExperimentsViewType = `DELETED_ONLY` - -// String representation for [fmt.Print] -func (f *SearchExperimentsViewType) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *SearchExperimentsViewType) Set(v string) error { - switch v { - case `ACTIVE_ONLY`, `ALL`, `DELETED_ONLY`: - *f = SearchExperimentsViewType(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "ACTIVE_ONLY", "ALL", "DELETED_ONLY"`, v) - } -} - -// Type always returns SearchExperimentsViewType to satisfy [pflag.Value] interface -func (f *SearchExperimentsViewType) Type() string { - return "SearchExperimentsViewType" -} - // Searches model versions type SearchModelVersionsRequest struct { // String filter condition, like "name='my-model-name'". Must be a single @@ -2535,17 +2511,17 @@ type SearchRuns struct { // Maximum number of runs desired. Max threshold is 50000 MaxResults int `json:"max_results,omitempty"` // List of columns to be ordered by, including attributes, params, metrics, - // and tags with an optional "DESC" or "ASC" annotation, where "ASC" is the - // default. Example: ["params.input DESC", "metrics.alpha ASC", - // "metrics.rmse"] Tiebreaks are done by start_time DESC followed by run_id - // for runs with the same start time (and this is the default ordering - // criterion if order_by is not provided). + // and tags with an optional `"DESC"` or `"ASC"` annotation, where `"ASC"` + // is the default. Example: `["params.input DESC", "metrics.alpha ASC", + // "metrics.rmse"]`. Tiebreaks are done by start_time `DESC` followed by + // `run_id` for runs with the same start time (and this is the default + // ordering criterion if order_by is not provided). OrderBy []string `json:"order_by,omitempty"` // Token for the current page of runs. PageToken string `json:"page_token,omitempty"` // Whether to display only active, only deleted, or all runs. Defaults to // only active runs. - RunViewType SearchRunsRunViewType `json:"run_view_type,omitempty"` + RunViewType ViewType `json:"run_view_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -2575,46 +2551,13 @@ func (s SearchRunsResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Whether to display only active, only deleted, or all runs. Defaults to only -// active runs. -type SearchRunsRunViewType string - -const SearchRunsRunViewTypeActiveOnly SearchRunsRunViewType = `ACTIVE_ONLY` - -const SearchRunsRunViewTypeAll SearchRunsRunViewType = `ALL` - -const SearchRunsRunViewTypeDeletedOnly SearchRunsRunViewType = `DELETED_ONLY` - -// String representation for [fmt.Print] -func (f *SearchRunsRunViewType) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *SearchRunsRunViewType) Set(v string) error { - switch v { - case `ACTIVE_ONLY`, `ALL`, `DELETED_ONLY`: - *f = SearchRunsRunViewType(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "ACTIVE_ONLY", "ALL", "DELETED_ONLY"`, v) - } -} - -// Type always returns SearchRunsRunViewType to satisfy [pflag.Value] interface -func (f *SearchRunsRunViewType) Type() string { - return "SearchRunsRunViewType" -} - type SetExperimentTag struct { // ID of the experiment under which to log the tag. Must be provided. ExperimentId string `json:"experiment_id"` - // Name of the tag. Maximum size depends on storage backend. All storage - // backends are guaranteed to support key values up to 250 bytes in size. + // Name of the tag. Keys up to 250 bytes in size are supported. Key string `json:"key"` - // String value of the tag being logged. Maximum size depends on storage - // backend. All storage backends are guaranteed to support key values up to - // 5000 bytes in size. + // String value of the tag being logged. Values up to 64KB in size are + // supported. Value string `json:"value"` } @@ -2658,17 +2601,15 @@ type SetModelVersionTagResponse struct { } type SetTag struct { - // Name of the tag. Maximum size depends on storage backend. All storage - // backends are guaranteed to support key values up to 250 bytes in size. + // Name of the tag. Keys up to 250 bytes in size are supported. Key string `json:"key"` // ID of the run under which to log the tag. Must be provided. RunId string `json:"run_id,omitempty"` - // [Deprecated, use run_id instead] ID of the run under which to log the + // [Deprecated, use `run_id` instead] ID of the run under which to log the // tag. This field will be removed in a future MLflow version. RunUuid string `json:"run_uuid,omitempty"` - // String value of the tag being logged. Maximum size depends on storage - // backend. All storage backends are guaranteed to support key values up to - // 5000 bytes in size. + // String value of the tag being logged. Values up to 64KB in size are + // supported. Value string `json:"value"` ForceSendFields []string `json:"-" url:"-"` @@ -3014,7 +2955,9 @@ type UpdateRun struct { EndTime int64 `json:"end_time,omitempty"` // ID of the run to update. Must be provided. RunId string `json:"run_id,omitempty"` - // [Deprecated, use run_id instead] ID of the run to update.. This field + // Updated name of the run. + RunName string `json:"run_name,omitempty"` + // [Deprecated, use `run_id` instead] ID of the run to update. This field // will be removed in a future MLflow version. RunUuid string `json:"run_uuid,omitempty"` // Updated status of the run. @@ -3036,7 +2979,7 @@ type UpdateRunResponse struct { RunInfo *RunInfo `json:"run_info,omitempty"` } -// Updated status of the run. +// Status of a run. type UpdateRunStatus string const UpdateRunStatusFailed UpdateRunStatus = `FAILED` @@ -3072,3 +3015,33 @@ func (f *UpdateRunStatus) Type() string { type UpdateWebhookResponse struct { } + +// Qualifier for the view type. +type ViewType string + +const ViewTypeActiveOnly ViewType = `ACTIVE_ONLY` + +const ViewTypeAll ViewType = `ALL` + +const ViewTypeDeletedOnly ViewType = `DELETED_ONLY` + +// String representation for [fmt.Print] +func (f *ViewType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ViewType) Set(v string) error { + switch v { + case `ACTIVE_ONLY`, `ALL`, `DELETED_ONLY`: + *f = ViewType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ACTIVE_ONLY", "ALL", "DELETED_ONLY"`, v) + } +} + +// Type always returns ViewType to satisfy [pflag.Value] interface +func (f *ViewType) Type() string { + return "ViewType" +} diff --git a/service/oauth2/impl.go b/service/oauth2/impl.go index b16f7b7a1..30c7f1069 100755 --- a/service/oauth2/impl.go +++ b/service/oauth2/impl.go @@ -450,7 +450,8 @@ func (a *servicePrincipalSecretsImpl) Create(ctx context.Context, request Create queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &createServicePrincipalSecretResponse) + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createServicePrincipalSecretResponse) return &createServicePrincipalSecretResponse, err } diff --git a/service/oauth2/model.go b/service/oauth2/model.go index 9b6eb340c..38974449d 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -128,15 +128,30 @@ func (s CreateServicePrincipalFederationPolicyRequest) MarshalJSON() ([]byte, er return marshal.Marshal(s) } -// Create service principal secret type CreateServicePrincipalSecretRequest struct { + // The lifetime of the secret in seconds. If this parameter is not provided, + // the secret will have a default lifetime of 730 days (63072000s). + Lifetime string `json:"lifetime,omitempty"` // The service principal ID. ServicePrincipalId int64 `json:"-" url:"-"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CreateServicePrincipalSecretRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateServicePrincipalSecretRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type CreateServicePrincipalSecretResponse struct { // UTC time when the secret was created CreateTime string `json:"create_time,omitempty"` + // UTC time when the secret will expire. If the field is not present, the + // secret does not expire. + ExpireTime string `json:"expire_time,omitempty"` // ID of the secret Id string `json:"id,omitempty"` // Secret Value @@ -591,6 +606,9 @@ func (s PublishedAppOutput) MarshalJSON() ([]byte, error) { type SecretInfo struct { // UTC time when the secret was created CreateTime string `json:"create_time,omitempty"` + // UTC time when the secret will expire. If the field is not present, the + // secret does not expire. + ExpireTime string `json:"expire_time,omitempty"` // ID of the secret Id string `json:"id,omitempty"` // Secret Hash diff --git a/service/serving/api.go b/service/serving/api.go index 26b3e65b6..3d5a3a133 100755 --- a/service/serving/api.go +++ b/service/serving/api.go @@ -9,6 +9,7 @@ import ( "time" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/dataplane" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/retries" "github.com/databricks/databricks-sdk-go/useragent" @@ -439,9 +440,12 @@ func NewServingEndpointsDataPlane(client *client.DatabricksClient, ) *ServingEndpointsDataPlaneAPI { return &ServingEndpointsDataPlaneAPI{ servingEndpointsDataPlaneImpl: servingEndpointsDataPlaneImpl{ - client: client, - dataPlaneService: NewDataPlaneService(), - controlPlane: controlPlane, + client: client, + controlPlane: controlPlane, + dpts: dataplane.NewEndpointTokenSource( + client, + client.Config.GetTokenSource(), + ), }, } } diff --git a/service/serving/impl.go b/service/serving/impl.go index 389c796b2..b8498ef53 100755 --- a/service/serving/impl.go +++ b/service/serving/impl.go @@ -4,15 +4,16 @@ package serving import ( "context" - "errors" "fmt" "net/http" + "strings" + "sync" "github.com/databricks/databricks-sdk-go/client" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/dataplane" "github.com/databricks/databricks-sdk-go/httpclient" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/useragent" - goauth "golang.org/x/oauth2" ) // unexported type that holds implementations of just ServingEndpoints API methods @@ -237,39 +238,41 @@ func (a *servingEndpointsImpl) UpdatePermissions(ctx context.Context, request Se // unexported type that holds implementations of just ServingEndpointsDataPlane API methods type servingEndpointsDataPlaneImpl struct { - dataPlaneService DataPlaneService - controlPlane *ServingEndpointsAPI - client *client.DatabricksClient + client *client.DatabricksClient + controlPlane *ServingEndpointsAPI + dpts dataplane.EndpointTokenSource + infos sync.Map } -func (a *servingEndpointsDataPlaneImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { - getRequest := GetServingEndpointRequest{ - Name: request.Name, - } - token, err := a.client.Config.GetToken() - if err != nil { - return nil, err - } - infoGetter := func() (*DataPlaneInfo, error) { - response, err := a.controlPlane.Get(ctx, getRequest) +func (a *servingEndpointsDataPlaneImpl) dataPlaneInfoQuery(ctx context.Context, request QueryEndpointInput) (*DataPlaneInfo, error) { + key := "Query/" + strings.Join([]string{ + fmt.Sprintf("%v", request.Name), + }, "/") + + info, ok := a.infos.Load(key) + if !ok { + response, err := a.controlPlane.Get(ctx, GetServingEndpointRequest{ + Name: request.Name, + }) if err != nil { return nil, err } - if response.DataPlaneInfo == nil { - return nil, errors.New("resource does not support direct Data Plane access") - } - return response.DataPlaneInfo.QueryInfo, nil + info = response.DataPlaneInfo.QueryInfo + a.infos.Store(key, info) } - refresh := func(info *DataPlaneInfo) (*goauth.Token, error) { - return a.client.GetOAuthToken(ctx, info.AuthorizationDetails, token) - } - getParams := []string{ - request.Name, + return info.(*DataPlaneInfo), nil +} + +func (a *servingEndpointsDataPlaneImpl) Query(ctx context.Context, request QueryEndpointInput) (*QueryEndpointResponse, error) { + dpi, err := a.dataPlaneInfoQuery(ctx, request) + if err != nil { + return nil, err } - endpointUrl, dataPlaneToken, err := a.dataPlaneService.GetDataPlaneDetails("Query", getParams, refresh, infoGetter) + dpt, err := a.dpts.Token(ctx, dpi.EndpointUrl, dpi.AuthorizationDetails) if err != nil { return nil, err } + headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" @@ -281,7 +284,7 @@ func (a *servingEndpointsDataPlaneImpl) Query(ctx context.Context, request Query var queryEndpointResponse QueryEndpointResponse opts = append(opts, httpclient.WithRequestData(request)) opts = append(opts, httpclient.WithResponseUnmarshal(&queryEndpointResponse)) - opts = append(opts, httpclient.WithToken(dataPlaneToken)) - err = a.client.ApiClient().Do(ctx, http.MethodPost, endpointUrl, opts...) + opts = append(opts, httpclient.WithToken(dpt)) + err = a.client.ApiClient().Do(ctx, http.MethodPost, dpi.EndpointUrl, opts...) return &queryEndpointResponse, err } diff --git a/service/serving/model.go b/service/serving/model.go index 3087942d7..424e151f7 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -247,6 +247,12 @@ type AmazonBedrockConfig struct { // The underlying provider in Amazon Bedrock. Supported values (case // insensitive) include: Anthropic, Cohere, AI21Labs, Amazon. BedrockProvider AmazonBedrockConfigBedrockProvider `json:"bedrock_provider"` + // ARN of the instance profile that the external model will use to access + // AWS resources. You must authenticate using an instance profile or access + // keys. If you prefer to authenticate using access keys, see + // `aws_access_key_id`, `aws_access_key_id_plaintext`, + // `aws_secret_access_key` and `aws_secret_access_key_plaintext`. + InstanceProfileArn string `json:"instance_profile_arn,omitempty"` ForceSendFields []string `json:"-" url:"-"` } diff --git a/service/sharing/api.go b/service/sharing/api.go index 5aa866541..f56e53d7d 100755 --- a/service/sharing/api.go +++ b/service/sharing/api.go @@ -9,7 +9,6 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" - "github.com/databricks/databricks-sdk-go/service/catalog" "github.com/databricks/databricks-sdk-go/useragent" ) @@ -76,6 +75,18 @@ type ProvidersInterface interface { // This method is generated by Databricks SDK Code Generator. ProviderInfoNameToMetastoreIdMap(ctx context.Context, request ListProvidersRequest) (map[string]string, error) + // List assets by provider share. + // + // Get arrays of assets associated with a specified provider's share. The caller + // is the recipient of the share. + ListProviderShareAssets(ctx context.Context, request ListProviderShareAssetsRequest) (*ListProviderShareAssetsResponse, error) + + // List assets by provider share. + // + // Get arrays of assets associated with a specified provider's share. The caller + // is the recipient of the share. + ListProviderShareAssetsByProviderNameAndShareName(ctx context.Context, providerName string, shareName string) (*ListProviderShareAssetsResponse, error) + // List shares by Provider. // // Gets an array of a specified provider's shares within the metastore where: @@ -171,6 +182,17 @@ func (a *ProvidersAPI) ProviderInfoNameToMetastoreIdMap(ctx context.Context, req return mapping, nil } +// List assets by provider share. +// +// Get arrays of assets associated with a specified provider's share. The caller +// is the recipient of the share. +func (a *ProvidersAPI) ListProviderShareAssetsByProviderNameAndShareName(ctx context.Context, providerName string, shareName string) (*ListProviderShareAssetsResponse, error) { + return a.providersImpl.ListProviderShareAssets(ctx, ListProviderShareAssetsRequest{ + ProviderName: providerName, + ShareName: shareName, + }) +} + // List shares by Provider. // // Gets an array of a specified provider's shares within the metastore where: @@ -445,13 +467,13 @@ type SharesInterface interface { // // Gets the permissions for a data share from the metastore. The caller must be // a metastore admin or the owner of the share. - SharePermissions(ctx context.Context, request SharePermissionsRequest) (*catalog.PermissionsList, error) + SharePermissions(ctx context.Context, request SharePermissionsRequest) (*GetSharePermissionsResponse, error) // Get permissions. // // Gets the permissions for a data share from the metastore. The caller must be // a metastore admin or the owner of the share. - SharePermissionsByName(ctx context.Context, name string) (*catalog.PermissionsList, error) + SharePermissionsByName(ctx context.Context, name string) (*GetSharePermissionsResponse, error) // Update a share. // @@ -480,9 +502,9 @@ type SharesInterface interface { // Updates the permissions for a data share in the metastore. The caller must be // a metastore admin or an owner of the share. // - // For new recipient grants, the user must also be the owner of the recipients. - // recipient revocations do not require additional privileges. - UpdatePermissions(ctx context.Context, request UpdateSharePermissions) error + // For new recipient grants, the user must also be the recipient owner or + // metastore admin. recipient revocations do not require additional privileges. + UpdatePermissions(ctx context.Context, request UpdateSharePermissions) (*UpdateSharePermissionsResponse, error) } func NewShares(client *client.DatabricksClient) *SharesAPI { @@ -526,7 +548,7 @@ func (a *SharesAPI) GetByName(ctx context.Context, name string) (*ShareInfo, err // // Gets the permissions for a data share from the metastore. The caller must be // a metastore admin or the owner of the share. -func (a *SharesAPI) SharePermissionsByName(ctx context.Context, name string) (*catalog.PermissionsList, error) { +func (a *SharesAPI) SharePermissionsByName(ctx context.Context, name string) (*GetSharePermissionsResponse, error) { return a.sharesImpl.SharePermissions(ctx, SharePermissionsRequest{ Name: name, }) diff --git a/service/sharing/impl.go b/service/sharing/impl.go index a3d8230ab..c2aa1e251 100755 --- a/service/sharing/impl.go +++ b/service/sharing/impl.go @@ -10,9 +10,6 @@ import ( "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" "github.com/databricks/databricks-sdk-go/useragent" - "golang.org/x/exp/slices" - - "github.com/databricks/databricks-sdk-go/service/catalog" ) // unexported type that holds implementations of just Providers API methods @@ -104,6 +101,16 @@ func (a *providersImpl) internalList(ctx context.Context, request ListProvidersR return &listProvidersResponse, err } +func (a *providersImpl) ListProviderShareAssets(ctx context.Context, request ListProviderShareAssetsRequest) (*ListProviderShareAssetsResponse, error) { + var listProviderShareAssetsResponse ListProviderShareAssetsResponse + path := fmt.Sprintf("/api/2.1/data-sharing/providers/%v/shares/%v", request.ProviderName, request.ShareName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listProviderShareAssetsResponse) + return &listProviderShareAssetsResponse, err +} + // List shares by Provider. // // Gets an array of a specified provider's shares within the metastore where: @@ -399,14 +406,14 @@ func (a *sharesImpl) internalList(ctx context.Context, request ListSharesRequest return &listSharesResponse, err } -func (a *sharesImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*catalog.PermissionsList, error) { - var permissionsList catalog.PermissionsList +func (a *sharesImpl) SharePermissions(ctx context.Context, request SharePermissionsRequest) (*GetSharePermissionsResponse, error) { + var getSharePermissionsResponse GetSharePermissionsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/shares/%v/permissions", request.Name) queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &permissionsList) - return &permissionsList, err + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getSharePermissionsResponse) + return &getSharePermissionsResponse, err } func (a *sharesImpl) Update(ctx context.Context, request UpdateShare) (*ShareInfo, error) { @@ -420,19 +427,13 @@ func (a *sharesImpl) Update(ctx context.Context, request UpdateShare) (*ShareInf return &shareInfo, err } -func (a *sharesImpl) UpdatePermissions(ctx context.Context, request UpdateSharePermissions) error { - var updatePermissionsResponse UpdatePermissionsResponse +func (a *sharesImpl) UpdatePermissions(ctx context.Context, request UpdateSharePermissions) (*UpdateSharePermissionsResponse, error) { + var updateSharePermissionsResponse UpdateSharePermissionsResponse path := fmt.Sprintf("/api/2.1/unity-catalog/shares/%v/permissions", request.Name) queryParams := make(map[string]any) - if request.MaxResults != 0 || slices.Contains(request.ForceSendFields, "MaxResults") { - queryParams["max_results"] = request.MaxResults - } - if request.PageToken != "" || slices.Contains(request.ForceSendFields, "PageToken") { - queryParams["page_token"] = request.PageToken - } headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updatePermissionsResponse) - return err + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateSharePermissionsResponse) + return &updateSharePermissionsResponse, err } diff --git a/service/sharing/interface.go b/service/sharing/interface.go index 3e74d4694..312f0c368 100755 --- a/service/sharing/interface.go +++ b/service/sharing/interface.go @@ -4,8 +4,6 @@ package sharing import ( "context" - - "github.com/databricks/databricks-sdk-go/service/catalog" ) // A data provider is an object representing the organization in the real world @@ -42,6 +40,12 @@ type ProvidersService interface { // Use ListAll() to get all ProviderInfo instances, which will iterate over every result page. List(ctx context.Context, request ListProvidersRequest) (*ListProvidersResponse, error) + // List assets by provider share. + // + // Get arrays of assets associated with a specified provider's share. The + // caller is the recipient of the share. + ListProviderShareAssets(ctx context.Context, request ListProviderShareAssetsRequest) (*ListProviderShareAssetsResponse, error) + // List shares by Provider. // // Gets an array of a specified provider's shares within the metastore @@ -196,7 +200,7 @@ type SharesService interface { // // Gets the permissions for a data share from the metastore. The caller must // be a metastore admin or the owner of the share. - SharePermissions(ctx context.Context, request SharePermissionsRequest) (*catalog.PermissionsList, error) + SharePermissions(ctx context.Context, request SharePermissionsRequest) (*GetSharePermissionsResponse, error) // Update a share. // @@ -225,7 +229,8 @@ type SharesService interface { // Updates the permissions for a data share in the metastore. The caller // must be a metastore admin or an owner of the share. // - // For new recipient grants, the user must also be the owner of the - // recipients. recipient revocations do not require additional privileges. - UpdatePermissions(ctx context.Context, request UpdateSharePermissions) error + // For new recipient grants, the user must also be the recipient owner or + // metastore admin. recipient revocations do not require additional + // privileges. + UpdatePermissions(ctx context.Context, request UpdateSharePermissions) (*UpdateSharePermissionsResponse, error) } diff --git a/service/sharing/model.go b/service/sharing/model.go index e893acc72..0784530a4 100755 --- a/service/sharing/model.go +++ b/service/sharing/model.go @@ -14,6 +14,8 @@ type AuthenticationType string const AuthenticationTypeDatabricks AuthenticationType = `DATABRICKS` +const AuthenticationTypeOauthClientCredentials AuthenticationType = `OAUTH_CLIENT_CREDENTIALS` + const AuthenticationTypeToken AuthenticationType = `TOKEN` // String representation for [fmt.Print] @@ -24,11 +26,11 @@ func (f *AuthenticationType) String() string { // Set raw string value and validate it against allowed values func (f *AuthenticationType) Set(v string) error { switch v { - case `DATABRICKS`, `TOKEN`: + case `DATABRICKS`, `OAUTH_CLIENT_CREDENTIALS`, `TOKEN`: *f = AuthenticationType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "DATABRICKS", "TOKEN"`, v) + return fmt.Errorf(`value "%s" is not one of "DATABRICKS", "OAUTH_CLIENT_CREDENTIALS", "TOKEN"`, v) } } @@ -37,6 +39,75 @@ func (f *AuthenticationType) Type() string { return "AuthenticationType" } +// UC supported column types Copied from +// https://src.dev.databricks.com/databricks/universe@23a85902bb58695ab9293adc9f327b0714b55e72/-/blob/managed-catalog/api/messages/table.proto?L68 +type ColumnTypeName string + +const ColumnTypeNameArray ColumnTypeName = `ARRAY` + +const ColumnTypeNameBinary ColumnTypeName = `BINARY` + +const ColumnTypeNameBoolean ColumnTypeName = `BOOLEAN` + +const ColumnTypeNameByte ColumnTypeName = `BYTE` + +const ColumnTypeNameChar ColumnTypeName = `CHAR` + +const ColumnTypeNameDate ColumnTypeName = `DATE` + +const ColumnTypeNameDecimal ColumnTypeName = `DECIMAL` + +const ColumnTypeNameDouble ColumnTypeName = `DOUBLE` + +const ColumnTypeNameFloat ColumnTypeName = `FLOAT` + +const ColumnTypeNameInt ColumnTypeName = `INT` + +const ColumnTypeNameInterval ColumnTypeName = `INTERVAL` + +const ColumnTypeNameLong ColumnTypeName = `LONG` + +const ColumnTypeNameMap ColumnTypeName = `MAP` + +const ColumnTypeNameNull ColumnTypeName = `NULL` + +const ColumnTypeNameShort ColumnTypeName = `SHORT` + +const ColumnTypeNameString ColumnTypeName = `STRING` + +const ColumnTypeNameStruct ColumnTypeName = `STRUCT` + +const ColumnTypeNameTableType ColumnTypeName = `TABLE_TYPE` + +const ColumnTypeNameTimestamp ColumnTypeName = `TIMESTAMP` + +const ColumnTypeNameTimestampNtz ColumnTypeName = `TIMESTAMP_NTZ` + +const ColumnTypeNameUserDefinedType ColumnTypeName = `USER_DEFINED_TYPE` + +const ColumnTypeNameVariant ColumnTypeName = `VARIANT` + +// String representation for [fmt.Print] +func (f *ColumnTypeName) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ColumnTypeName) Set(v string) error { + switch v { + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT`: + *f = ColumnTypeName(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE", "VARIANT"`, v) + } +} + +// Type always returns ColumnTypeName to satisfy [pflag.Value] interface +func (f *ColumnTypeName) Type() string { + return "ColumnTypeName" +} + type CreateProvider struct { // The delta sharing authentication type. AuthenticationType AuthenticationType `json:"authentication_type"` @@ -137,6 +208,199 @@ type DeleteShareRequest struct { Name string `json:"-" url:"-"` } +// Represents a UC dependency. +type DeltaSharingDependency struct { + // A Function in UC as a dependency. + Function *DeltaSharingFunctionDependency `json:"function,omitempty"` + // A Table in UC as a dependency. + Table *DeltaSharingTableDependency `json:"table,omitempty"` +} + +// Represents a list of dependencies. +type DeltaSharingDependencyList struct { + // An array of Dependency. + Dependencies []DeltaSharingDependency `json:"dependencies,omitempty"` +} + +// A Function in UC as a dependency. +type DeltaSharingFunctionDependency struct { + FunctionName string `json:"function_name,omitempty"` + + SchemaName string `json:"schema_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *DeltaSharingFunctionDependency) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeltaSharingFunctionDependency) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A Table in UC as a dependency. +type DeltaSharingTableDependency struct { + SchemaName string `json:"schema_name,omitempty"` + + TableName string `json:"table_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *DeltaSharingTableDependency) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeltaSharingTableDependency) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Function struct { + // The aliass of registered model. + Aliases []RegisteredModelAlias `json:"aliases,omitempty"` + // The comment of the function. + Comment string `json:"comment,omitempty"` + // The data type of the function. + DataType ColumnTypeName `json:"data_type,omitempty"` + // The dependency list of the function. + DependencyList *DeltaSharingDependencyList `json:"dependency_list,omitempty"` + // The full data type of the function. + FullDataType string `json:"full_data_type,omitempty"` + // The id of the function. + Id string `json:"id,omitempty"` + // The function parameter information. + InputParams *FunctionParameterInfos `json:"input_params,omitempty"` + // The name of the function. + Name string `json:"name,omitempty"` + // The properties of the function. + Properties string `json:"properties,omitempty"` + // The routine definition of the function. + RoutineDefinition string `json:"routine_definition,omitempty"` + // The name of the schema that the function belongs to. + Schema string `json:"schema,omitempty"` + // The securable kind of the function. + SecurableKind SharedSecurableKind `json:"securable_kind,omitempty"` + // The name of the share that the function belongs to. + Share string `json:"share,omitempty"` + // The id of the share that the function belongs to. + ShareId string `json:"share_id,omitempty"` + // The storage location of the function. + StorageLocation string `json:"storage_location,omitempty"` + // The tags of the function. + Tags []catalog.TagKeyValue `json:"tags,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *Function) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Function) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Represents a parameter of a function. The same message is used for both input +// and output columns. +type FunctionParameterInfo struct { + // The comment of the parameter. + Comment string `json:"comment,omitempty"` + // The name of the parameter. + Name string `json:"name,omitempty"` + // The default value of the parameter. + ParameterDefault string `json:"parameter_default,omitempty"` + // The mode of the function parameter. + ParameterMode FunctionParameterMode `json:"parameter_mode,omitempty"` + // The type of the function parameter. + ParameterType FunctionParameterType `json:"parameter_type,omitempty"` + // The position of the parameter. + Position int `json:"position,omitempty"` + // The interval type of the parameter type. + TypeIntervalType string `json:"type_interval_type,omitempty"` + // The type of the parameter in JSON format. + TypeJson string `json:"type_json,omitempty"` + // The type of the parameter in Enum format. + TypeName ColumnTypeName `json:"type_name,omitempty"` + // The precision of the parameter type. + TypePrecision int `json:"type_precision,omitempty"` + // The scale of the parameter type. + TypeScale int `json:"type_scale,omitempty"` + // The type of the parameter in text format. + TypeText string `json:"type_text,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *FunctionParameterInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s FunctionParameterInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type FunctionParameterInfos struct { + // The list of parameters of the function. + Parameters []FunctionParameterInfo `json:"parameters,omitempty"` +} + +type FunctionParameterMode string + +const FunctionParameterModeIn FunctionParameterMode = `IN` + +const FunctionParameterModeInout FunctionParameterMode = `INOUT` + +const FunctionParameterModeOut FunctionParameterMode = `OUT` + +// String representation for [fmt.Print] +func (f *FunctionParameterMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionParameterMode) Set(v string) error { + switch v { + case `IN`, `INOUT`, `OUT`: + *f = FunctionParameterMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "IN", "INOUT", "OUT"`, v) + } +} + +// Type always returns FunctionParameterMode to satisfy [pflag.Value] interface +func (f *FunctionParameterMode) Type() string { + return "FunctionParameterMode" +} + +type FunctionParameterType string + +const FunctionParameterTypeColumn FunctionParameterType = `COLUMN` + +const FunctionParameterTypeParam FunctionParameterType = `PARAM` + +// String representation for [fmt.Print] +func (f *FunctionParameterType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *FunctionParameterType) Set(v string) error { + switch v { + case `COLUMN`, `PARAM`: + *f = FunctionParameterType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "COLUMN", "PARAM"`, v) + } +} + +// Type always returns FunctionParameterType to satisfy [pflag.Value] interface +func (f *FunctionParameterType) Type() string { + return "FunctionParameterType" +} + // Get a share activation URL type GetActivationUrlInfoRequest struct { // The one time activation url. It also accepts activation token. @@ -177,6 +441,25 @@ func (s GetRecipientSharePermissionsResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type GetSharePermissionsResponse struct { + // Opaque token to retrieve the next page of results. Absent if there are no + // more pages. __page_token__ should be set to this value for the next + // request (for the next page of results). + NextPageToken string `json:"next_page_token,omitempty"` + // The privileges assigned to each principal + PrivilegeAssignments []PrivilegeAssignment `json:"privilege_assignments,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GetSharePermissionsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetSharePermissionsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get a share type GetShareRequest struct { // Query for data to include in the share. @@ -200,6 +483,45 @@ type IpAccessList struct { AllowedIpAddresses []string `json:"allowed_ip_addresses,omitempty"` } +// List assets by provider share +type ListProviderShareAssetsRequest struct { + // Maximum number of functions to return. + FunctionMaxResults int `json:"-" url:"function_max_results,omitempty"` + // Maximum number of notebooks to return. + NotebookMaxResults int `json:"-" url:"notebook_max_results,omitempty"` + // The name of the provider who owns the share. + ProviderName string `json:"-" url:"-"` + // The name of the share. + ShareName string `json:"-" url:"-"` + // Maximum number of tables to return. + TableMaxResults int `json:"-" url:"table_max_results,omitempty"` + // Maximum number of volumes to return. + VolumeMaxResults int `json:"-" url:"volume_max_results,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ListProviderShareAssetsRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListProviderShareAssetsRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Response to ListProviderShareAssets, which contains the list of assets of a +// share. +type ListProviderShareAssetsResponse struct { + // The list of functions in the share. + Functions []Function `json:"functions,omitempty"` + // The list of notebooks in the share. + Notebooks []NotebookFile `json:"notebooks,omitempty"` + // The list of tables in the share. + Tables []Table `json:"tables,omitempty"` + // The list of volumes in the share. + Volumes []Volume `json:"volumes,omitempty"` +} + type ListProviderSharesResponse struct { // Opaque token to retrieve the next page of results. Absent if there are no // more pages. __page_token__ should be set to this value for the next @@ -362,12 +684,32 @@ func (s ListSharesResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type Partition struct { - // An array of partition values. - Values []PartitionValue `json:"values,omitempty"` +type NotebookFile struct { + // The comment of the notebook file. + Comment string `json:"comment,omitempty"` + // The id of the notebook file. + Id string `json:"id,omitempty"` + // Name of the notebook file. + Name string `json:"name,omitempty"` + // The name of the share that the notebook file belongs to. + Share string `json:"share,omitempty"` + // The id of the share that the notebook file belongs to. + ShareId string `json:"share_id,omitempty"` + // The tags of the notebook file. + Tags []catalog.TagKeyValue `json:"tags,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` } -type PartitionSpecificationPartition struct { +func (s *NotebookFile) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s NotebookFile) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type Partition struct { // An array of partition values. Values []PartitionValue `json:"values,omitempty"` } @@ -424,6 +766,25 @@ func (f *PartitionValueOp) Type() string { return "PartitionValueOp" } +type PermissionsChange struct { + // The set of privileges to add. + Add []string `json:"add,omitempty"` + // The principal whose privileges we are changing. + Principal string `json:"principal,omitempty"` + // The set of privileges to remove. + Remove []string `json:"remove,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *PermissionsChange) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PermissionsChange) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type Privilege string const PrivilegeAccess Privilege = `ACCESS` @@ -727,6 +1088,23 @@ func (s RecipientTokenInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type RegisteredModelAlias struct { + // Name of the alias. + AliasName string `json:"alias_name,omitempty"` + // Numeric model version that alias will reference. + VersionNum int64 `json:"version_num,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *RegisteredModelAlias) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s RegisteredModelAlias) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get an access token type RetrieveTokenRequest struct { // The one time activation url. It also accepts activation token. @@ -858,7 +1236,6 @@ type SharedDataObject struct { // Whether to enable cdf or indicate if cdf is enabled on the shared object. CdfEnabled bool `json:"cdf_enabled,omitempty"` // A user-provided comment when adding the data object to the share. - // [Update:OPT] Comment string `json:"comment,omitempty"` // The content of the notebook file when the data object type is // NOTEBOOK_FILE. This should be base64 encoded. Required for adding a @@ -869,10 +1246,9 @@ type SharedDataObject struct { // Whether to enable or disable sharing of data history. If not specified, // the default is **DISABLED**. HistoryDataSharingStatus SharedDataObjectHistoryDataSharingStatus `json:"history_data_sharing_status,omitempty"` - // A fully qualified name that uniquely identifies a data object. - // - // For example, a table's fully qualified name is in the format of - // `..`. + // A fully qualified name that uniquely identifies a data object. For + // example, a table's fully qualified name is in the format of + // `..
`, Name string `json:"name"` // Array of partitions for the shared data. Partitions []Partition `json:"partitions,omitempty"` @@ -891,11 +1267,11 @@ type SharedDataObject struct { StartVersion int64 `json:"start_version,omitempty"` // One of: **ACTIVE**, **PERMISSION_DENIED**. Status SharedDataObjectStatus `json:"status,omitempty"` - // A user-provided new name for the data object within the share. If this - // new name is not provided, the object's original name will be used as the - // `string_shared_as` name. The `string_shared_as` name must be unique - // within a share. For notebooks, the new name should be the new notebook - // file name. + // A user-provided new name for the shared object within the share. If this + // new name is not not provided, the object's original name will be used as + // the `string_shared_as` name. The `string_shared_as` name must be unique + // for objects of the same type within a Share. For notebooks, the new name + // should be the new notebook file name. StringSharedAs string `json:"string_shared_as,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -909,7 +1285,6 @@ func (s SharedDataObject) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The type of the data object. type SharedDataObjectDataObjectType string const SharedDataObjectDataObjectTypeFeatureSpec SharedDataObjectDataObjectType = `FEATURE_SPEC` @@ -951,8 +1326,6 @@ func (f *SharedDataObjectDataObjectType) Type() string { return "SharedDataObjectDataObjectType" } -// Whether to enable or disable sharing of data history. If not specified, the -// default is **DISABLED**. type SharedDataObjectHistoryDataSharingStatus string const SharedDataObjectHistoryDataSharingStatusDisabled SharedDataObjectHistoryDataSharingStatus = `DISABLED` @@ -980,7 +1353,6 @@ func (f *SharedDataObjectHistoryDataSharingStatus) Type() string { return "SharedDataObjectHistoryDataSharingStatus" } -// One of: **ACTIVE**, **PERMISSION_DENIED**. type SharedDataObjectStatus string const SharedDataObjectStatusActive SharedDataObjectStatus = `ACTIVE` @@ -1015,7 +1387,6 @@ type SharedDataObjectUpdate struct { DataObject *SharedDataObject `json:"data_object,omitempty"` } -// One of: **ADD**, **REMOVE**, **UPDATE**. type SharedDataObjectUpdateAction string const SharedDataObjectUpdateActionAdd SharedDataObjectUpdateAction = `ADD` @@ -1045,7 +1416,132 @@ func (f *SharedDataObjectUpdateAction) Type() string { return "SharedDataObjectUpdateAction" } -type UpdatePermissionsResponse struct { +// The SecurableKind of a delta-shared object. +type SharedSecurableKind string + +const SharedSecurableKindFunctionFeatureSpec SharedSecurableKind = `FUNCTION_FEATURE_SPEC` + +const SharedSecurableKindFunctionRegisteredModel SharedSecurableKind = `FUNCTION_REGISTERED_MODEL` + +const SharedSecurableKindFunctionStandard SharedSecurableKind = `FUNCTION_STANDARD` + +// String representation for [fmt.Print] +func (f *SharedSecurableKind) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SharedSecurableKind) Set(v string) error { + switch v { + case `FUNCTION_FEATURE_SPEC`, `FUNCTION_REGISTERED_MODEL`, `FUNCTION_STANDARD`: + *f = SharedSecurableKind(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "FUNCTION_FEATURE_SPEC", "FUNCTION_REGISTERED_MODEL", "FUNCTION_STANDARD"`, v) + } +} + +// Type always returns SharedSecurableKind to satisfy [pflag.Value] interface +func (f *SharedSecurableKind) Type() string { + return "SharedSecurableKind" +} + +type Table struct { + // The comment of the table. + Comment string `json:"comment,omitempty"` + // The id of the table. + Id string `json:"id,omitempty"` + // Internal information for D2D sharing that should not be disclosed to + // external users. + InternalAttributes *TableInternalAttributes `json:"internal_attributes,omitempty"` + // The name of a materialized table. + MaterializedTableName string `json:"materialized_table_name,omitempty"` + // The name of the table. + Name string `json:"name,omitempty"` + // The name of the schema that the table belongs to. + Schema string `json:"schema,omitempty"` + // The name of the share that the table belongs to. + Share string `json:"share,omitempty"` + // The id of the share that the table belongs to. + ShareId string `json:"share_id,omitempty"` + // The Tags of the table. + Tags []catalog.TagKeyValue `json:"tags,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *Table) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Table) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Internal information for D2D sharing that should not be disclosed to external +// users. +type TableInternalAttributes struct { + // Will be populated in the reconciliation response for VIEW and + // FOREIGN_TABLE, with the value of the parent UC entity's storage_location, + // following the same logic as getManagedEntityPath in + // CreateStagingTableHandler, which is used to store the materialized table + // for a shared VIEW/FOREIGN_TABLE for D2O queries. The value will be used + // on the recipient side to be whitelisted when SEG is enabled on the + // workspace of the recipient, to allow the recipient users to query this + // shared VIEW/FOREIGN_TABLE. + ParentStorageLocation string `json:"parent_storage_location,omitempty"` + // The cloud storage location of a shard table with DIRECTORY_BASED_TABLE + // type. + StorageLocation string `json:"storage_location,omitempty"` + // The type of the shared table. + Type TableInternalAttributesSharedTableType `json:"type,omitempty"` + // The view definition of a shared view. DEPRECATED. + ViewDefinition string `json:"view_definition,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *TableInternalAttributes) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s TableInternalAttributes) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type TableInternalAttributesSharedTableType string + +const TableInternalAttributesSharedTableTypeDirectoryBasedTable TableInternalAttributesSharedTableType = `DIRECTORY_BASED_TABLE` + +const TableInternalAttributesSharedTableTypeFileBasedTable TableInternalAttributesSharedTableType = `FILE_BASED_TABLE` + +const TableInternalAttributesSharedTableTypeForeignTable TableInternalAttributesSharedTableType = `FOREIGN_TABLE` + +const TableInternalAttributesSharedTableTypeMaterializedView TableInternalAttributesSharedTableType = `MATERIALIZED_VIEW` + +const TableInternalAttributesSharedTableTypeStreamingTable TableInternalAttributesSharedTableType = `STREAMING_TABLE` + +const TableInternalAttributesSharedTableTypeView TableInternalAttributesSharedTableType = `VIEW` + +// String representation for [fmt.Print] +func (f *TableInternalAttributesSharedTableType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *TableInternalAttributesSharedTableType) Set(v string) error { + switch v { + case `DIRECTORY_BASED_TABLE`, `FILE_BASED_TABLE`, `FOREIGN_TABLE`, `MATERIALIZED_VIEW`, `STREAMING_TABLE`, `VIEW`: + *f = TableInternalAttributesSharedTableType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DIRECTORY_BASED_TABLE", "FILE_BASED_TABLE", "FOREIGN_TABLE", "MATERIALIZED_VIEW", "STREAMING_TABLE", "VIEW"`, v) + } +} + +// Type always returns TableInternalAttributesSharedTableType to satisfy [pflag.Value] interface +func (f *TableInternalAttributesSharedTableType) Type() string { + return "TableInternalAttributesSharedTableType" } type UpdateProvider struct { @@ -1129,29 +1625,63 @@ func (s UpdateShare) MarshalJSON() ([]byte, error) { type UpdateSharePermissions struct { // Array of permission changes. - Changes []catalog.PermissionsChange `json:"changes,omitempty"` - // Maximum number of permissions to return. - when set to 0, the page length - // is set to a server configured value (recommended); - when set to a value - // greater than 0, the page length is the minimum of this value and a server - // configured value; - when set to a value less than 0, an invalid parameter - // error is returned; - If not set, all valid permissions are returned (not - // recommended). - Note: The number of returned permissions might be less - // than the specified max_results size, even zero. The only definitive - // indication that no further permissions can be fetched is when the - // next_page_token is unset from the response. - MaxResults int `json:"-" url:"max_results,omitempty"` + Changes []PermissionsChange `json:"changes,omitempty"` // The name of the share. Name string `json:"-" url:"-"` - // Opaque pagination token to go to next page based on previous query. - PageToken string `json:"-" url:"page_token,omitempty"` +} + +type UpdateSharePermissionsResponse struct { + // The privileges assigned to each principal + PrivilegeAssignments []PrivilegeAssignment `json:"privilege_assignments,omitempty"` +} + +type Volume struct { + // The comment of the volume. + Comment string `json:"comment,omitempty"` + // This id maps to the shared_volume_id in database Recipient needs + // shared_volume_id for recon to check if this volume is already in + // recipient's DB or not. + Id string `json:"id,omitempty"` + // Internal attributes for D2D sharing that should not be disclosed to + // external users. + InternalAttributes *VolumeInternalAttributes `json:"internal_attributes,omitempty"` + // The name of the volume. + Name string `json:"name,omitempty"` + // The name of the schema that the volume belongs to. + Schema string `json:"schema,omitempty"` + // The name of the share that the volume belongs to. + Share string `json:"share,omitempty"` + // / The id of the share that the volume belongs to. + ShareId string `json:"share_id,omitempty"` + // The tags of the volume. + Tags []catalog.TagKeyValue `json:"tags,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *Volume) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Volume) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Internal information for D2D sharing that should not be disclosed to external +// users. +type VolumeInternalAttributes struct { + // The cloud storage location of the volume + StorageLocation string `json:"storage_location,omitempty"` + // The type of the shared volume. + Type string `json:"type,omitempty"` ForceSendFields []string `json:"-" url:"-"` } -func (s *UpdateSharePermissions) UnmarshalJSON(b []byte) error { +func (s *VolumeInternalAttributes) UnmarshalJSON(b []byte) error { return marshal.Unmarshal(b, s) } -func (s UpdateSharePermissions) MarshalJSON() ([]byte, error) { +func (s VolumeInternalAttributes) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index a2aa845a0..4fdb86ed0 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -572,6 +572,8 @@ func (s QueryVectorIndexNextPageRequest) MarshalJSON() ([]byte, error) { type QueryVectorIndexRequest struct { // List of column names to include in the response. Columns []string `json:"columns"` + // Column names used to retrieve data to send to the reranker. + ColumnsToRerank []string `json:"columns_to_rerank,omitempty"` // JSON string representing query filters. // // Example filters: - `{"id <": 5}`: Filter for id less than 5. - `{"id >": diff --git a/service/workspace/model.go b/service/workspace/model.go index 03dbb05b7..82e7bad67 100755 --- a/service/workspace/model.go +++ b/service/workspace/model.go @@ -274,6 +274,7 @@ type DeleteSecret struct { type DeleteSecretResponse struct { } +// The format for workspace import and export. type ExportFormat string const ExportFormatAuto ExportFormat = `AUTO` @@ -284,6 +285,8 @@ const ExportFormatHtml ExportFormat = `HTML` const ExportFormatJupyter ExportFormat = `JUPYTER` +const ExportFormatRaw ExportFormat = `RAW` + const ExportFormatRMarkdown ExportFormat = `R_MARKDOWN` const ExportFormatSource ExportFormat = `SOURCE` @@ -296,11 +299,11 @@ func (f *ExportFormat) String() string { // Set raw string value and validate it against allowed values func (f *ExportFormat) Set(v string) error { switch v { - case `AUTO`, `DBC`, `HTML`, `JUPYTER`, `R_MARKDOWN`, `SOURCE`: + case `AUTO`, `DBC`, `HTML`, `JUPYTER`, `RAW`, `R_MARKDOWN`, `SOURCE`: *f = ExportFormat(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "AUTO", "DBC", "HTML", "JUPYTER", "R_MARKDOWN", "SOURCE"`, v) + return fmt.Errorf(`value "%s" is not one of "AUTO", "DBC", "HTML", "JUPYTER", "RAW", "R_MARKDOWN", "SOURCE"`, v) } } @@ -331,6 +334,8 @@ type ExportRequest struct { Path string `json:"-" url:"path"` } +// The request field `direct_download` determines whether a JSON response or +// binary contents are returned by this endpoint. type ExportResponse struct { // The base64-encoded content. If the limit (10MB) is exceeded, exception // with error code **MAX_NOTEBOOK_SIZE_EXCEEDED** is thrown. @@ -528,39 +533,21 @@ func (s Import) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// This specifies the format of the file to be imported. -// -// The value is case sensitive. -// -// - `AUTO`: The item is imported depending on an analysis of the item's -// extension and the header content provided in the request. If the item is -// imported as a notebook, then the item's extension is automatically removed. - -// `SOURCE`: The notebook or directory is imported as source code. - `HTML`: The -// notebook is imported as an HTML file. - `JUPYTER`: The notebook is imported -// as a Jupyter/IPython Notebook file. - `DBC`: The notebook is imported in -// Databricks archive format. Required for directories. - `R_MARKDOWN`: The -// notebook is imported from R Markdown format. +// The format for workspace import and export. type ImportFormat string -// The item is imported depending on an analysis of the item's extension and const ImportFormatAuto ImportFormat = `AUTO` -// The notebook is imported in archive format. Required for -// directories. const ImportFormatDbc ImportFormat = `DBC` -// The notebook is imported as an HTML file. const ImportFormatHtml ImportFormat = `HTML` -// The notebook is imported as a Jupyter/IPython Notebook file. const ImportFormatJupyter ImportFormat = `JUPYTER` const ImportFormatRaw ImportFormat = `RAW` -// The notebook is imported from R Markdown format. const ImportFormatRMarkdown ImportFormat = `R_MARKDOWN` -// The notebook or directory is imported as source code. const ImportFormatSource ImportFormat = `SOURCE` // String representation for [fmt.Print] @@ -587,8 +574,7 @@ func (f *ImportFormat) Type() string { type ImportResponse struct { } -// The language of the object. This value is set only if the object type is -// `NOTEBOOK`. +// The language of notebook. type Language string const LanguagePython Language = `PYTHON` @@ -700,7 +686,7 @@ type ListSecretsResponse struct { // List contents type ListWorkspaceRequest struct { // UTC timestamp in milliseconds - NotebooksModifiedAfter int `json:"-" url:"notebooks_modified_after,omitempty"` + NotebooksModifiedAfter int64 `json:"-" url:"notebooks_modified_after,omitempty"` // The absolute path of the notebook or directory. Path string `json:"-" url:"path"` @@ -725,11 +711,13 @@ type Mkdirs struct { type MkdirsResponse struct { } +// The information of the object in workspace. It will be returned by “list“ +// and “get-status“. type ObjectInfo struct { // Only applicable to files. The creation UTC timestamp. CreatedAt int64 `json:"created_at,omitempty"` // The language of the object. This value is set only if the object type is - // `NOTEBOOK`. + // ``NOTEBOOK``. Language Language `json:"language,omitempty"` // Only applicable to files, the last modified UTC timestamp. ModifiedAt int64 `json:"modified_at,omitempty"` @@ -761,28 +749,18 @@ func (s ObjectInfo) MarshalJSON() ([]byte, error) { } // The type of the object in workspace. -// -// - `NOTEBOOK`: document that contains runnable code, visualizations, and -// explanatory text. - `DIRECTORY`: directory - `LIBRARY`: library - `FILE`: -// file - `REPO`: repository - `DASHBOARD`: Lakeview dashboard type ObjectType string -// Lakeview dashboard const ObjectTypeDashboard ObjectType = `DASHBOARD` -// directory const ObjectTypeDirectory ObjectType = `DIRECTORY` -// file const ObjectTypeFile ObjectType = `FILE` -// library const ObjectTypeLibrary ObjectType = `LIBRARY` -// document that contains runnable code, visualizations, and explanatory text. const ObjectTypeNotebook ObjectType = `NOTEBOOK` -// repository const ObjectTypeRepo ObjectType = `REPO` // String representation for [fmt.Print] From 728c6e75211b66dc8768f2adaaf01b55c24bfda8 Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Mon, 3 Mar 2025 15:48:19 +0000 Subject: [PATCH 24/54] [Release] Release v0.59.0 ## Release v0.59.0 ### Bug Fixes * Fix unlikely issue due to conflicting error details in `APIError`. ### Internal Changes * Update Jobs ListRuns API to support paginated responses ([#1151](https://github.com/databricks/databricks-sdk-go/pull/1151)) * Add `poll.SimpleError` to mock waiter objects returning errors ([#1155](https://github.com/databricks/databricks-sdk-go/pull/1155)) * Refactor `APIError` to expose different types of error details ([#1153](https://github.com/databricks/databricks-sdk-go/pull/1153)). * Update Jobs ListJobs API to support paginated responses ([#1150](https://github.com/databricks/databricks-sdk-go/pull/1150)) * Introduce automated tagging ([#1148](https://github.com/databricks/databricks-sdk-go/pull/1148)). * Update Jobs GetJob API to support paginated responses ([#1133](https://github.com/databricks/databricks-sdk-go/pull/1133)). * Update Jobs GetRun API to support paginated responses ([#1132](https://github.com/databricks/databricks-sdk-go/pull/1132)). ### API Changes * Added `GetSpace` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. * Added `ListProviderShareAssets` method for [w.Providers](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#ProvidersAPI) workspace-level service. * Added `BudgetPolicyId` and `EffectiveBudgetPolicyId` fields for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). * Added `Policy` field for [billing.CreateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#CreateBudgetPolicyRequest). * Added `DatabricksGcpServiceAccount` field for [catalog.ValidateCredentialRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ValidateCredentialRequest). * Added `AttachmentId` field for [dashboards.GenieAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAttachment). * Added `ConversationId` field for [dashboards.GenieConversation](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieConversation). * Added `MessageId` field for [dashboards.GenieMessage](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieMessage). * Added `Description`, `Id`, `LastUpdatedTimestamp`, `Query`, `QueryResultMetadata` and `Title` fields for [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). * Added `GenAiComputeTask` field for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). * Added `GenAiComputeTask` field for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). * Added `GenAiComputeTask` field for [jobs.Task](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Task). * Added `RunName` field for [ml.CreateRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateRun). * Added `RunName` field for [ml.RunInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#RunInfo). * Added `RunName` field for [ml.UpdateRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#UpdateRun). * Added `Lifetime` field for [oauth2.CreateServicePrincipalSecretRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateServicePrincipalSecretRequest). * Added `ExpireTime` field for [oauth2.CreateServicePrincipalSecretResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateServicePrincipalSecretResponse). * Added `ExpireTime` field for [oauth2.SecretInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#SecretInfo). * Added `InstanceProfileArn` field for [serving.AmazonBedrockConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AmazonBedrockConfig). * Added `Add`, `Principal` and `Remove` fields for [sharing.PermissionsChange](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#PermissionsChange). * Added `ColumnsToRerank` field for [vectorsearch.QueryVectorIndexRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#QueryVectorIndexRequest). * Added `Oracle` and `Teradata` enum values for [catalog.ConnectionType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ConnectionType). * Added `FunctionArgumentsInvalidTypeException` and `MessageCancelledWhileExecutingException` enum values for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). * Added `Waiting` enum value for [jobs.RunLifecycleStateV2State](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunLifecycleStateV2State). * Added `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). * Added `OauthClientCredentials` enum value for [sharing.AuthenticationType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#AuthenticationType). * Added `Raw` enum value for [workspace.ExportFormat](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/workspace#ExportFormat). * [Breaking] Changed `GetByName` method for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service to return [ml.GetExperimentByNameResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#GetExperimentByNameResponse). * [Breaking] Changed `LogInputs` method for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service with new required argument order. * [Breaking] Changed `SharePermissions` and `UpdatePermissions` methods for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service return type to become non-empty. * [Breaking] Changed `SharePermissions` method for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service to return [sharing.GetSharePermissionsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#GetSharePermissionsResponse). * [Breaking] Changed `UpdatePermissions` method for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service to return [sharing.UpdateSharePermissionsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#UpdateSharePermissionsResponse). * Changed `PolicyId` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy) to no longer be required. * [Breaking] Changed `PolicyId` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy) to no longer be required. * [Breaking] Changed `Partitions` field for [cleanrooms.CleanRoomAssetTableLocalDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetTableLocalDetails) to type [cleanrooms.PartitionList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#PartitionList). * [Breaking] Changed `Query` field for [dashboards.GenieAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAttachment) to type [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). * Changed `Digest`, `Name`, `Source` and `SourceType` fields for [ml.Dataset](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Dataset) to be required. * [Breaking] Changed `Digest`, `Name`, `Source` and `SourceType` fields for [ml.Dataset](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Dataset) to be required. * [Breaking] Changed `Dataset` field for [ml.DatasetInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#DatasetInput) to be required. * Changed `Dataset` field for [ml.DatasetInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#DatasetInput) to be required. * Changed `Key` and `Value` fields for [ml.InputTag](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#InputTag) to be required. * [Breaking] Changed `Key` and `Value` fields for [ml.InputTag](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#InputTag) to be required. * [Breaking] Changed `ViewType` field for [ml.ListExperimentsRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ListExperimentsRequest) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). * [Breaking] Changed `RunId` field for [ml.LogInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogInputs) to be required. * [Breaking] Changed `ViewType` field for [ml.SearchExperiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchExperiments) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). * [Breaking] Changed `RunViewType` field for [ml.SearchRuns](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchRuns) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). * [Breaking] Removed `CustomTags` and `PolicyName` fields for [billing.CreateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#CreateBudgetPolicyRequest). * [Breaking] Removed `CachedQuerySchema`, `Description`, `Id`, `InstructionId`, `InstructionTitle`, `LastUpdatedTimestamp`, `Query`, `StatementId` and `Title` fields for [dashboards.QueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#QueryAttachment). * [Breaking] Removed `MaxResults` and `PageToken` fields for [sharing.UpdateSharePermissions](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#UpdateSharePermissions). * [Breaking] Removed `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.SearchExperimentsViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchExperimentsViewType). * [Breaking] Removed `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.SearchRunsRunViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchRunsRunViewType). --- .release_metadata.json | 3 ++ CHANGELOG.md | 70 ++++++++++++++++++++++++++++++++++++++++++ NEXT_CHANGELOG.md | 63 +------------------------------------ 3 files changed, 74 insertions(+), 62 deletions(-) create mode 100644 .release_metadata.json diff --git a/.release_metadata.json b/.release_metadata.json new file mode 100644 index 000000000..22a4039ad --- /dev/null +++ b/.release_metadata.json @@ -0,0 +1,3 @@ +{ + "timestamp": "2025-03-03 15:48:14+0000" +} \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index dbad64813..c4b041750 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,75 @@ # Version changelog +## Release v0.59.0 + +### Bug Fixes + +* Fix unlikely issue due to conflicting error details in `APIError`. + +### Internal Changes + +* Update Jobs ListRuns API to support paginated responses ([#1151](https://github.com/databricks/databricks-sdk-go/pull/1151)) +* Add `poll.SimpleError` to mock waiter objects returning errors ([#1155](https://github.com/databricks/databricks-sdk-go/pull/1155)) +* Refactor `APIError` to expose different types of error details ([#1153](https://github.com/databricks/databricks-sdk-go/pull/1153)). +* Update Jobs ListJobs API to support paginated responses ([#1150](https://github.com/databricks/databricks-sdk-go/pull/1150)) +* Introduce automated tagging ([#1148](https://github.com/databricks/databricks-sdk-go/pull/1148)). +* Update Jobs GetJob API to support paginated responses ([#1133](https://github.com/databricks/databricks-sdk-go/pull/1133)). +* Update Jobs GetRun API to support paginated responses ([#1132](https://github.com/databricks/databricks-sdk-go/pull/1132)). + +### API Changes +* Added `GetSpace` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Added `ListProviderShareAssets` method for [w.Providers](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#ProvidersAPI) workspace-level service. +* Added `BudgetPolicyId` and `EffectiveBudgetPolicyId` fields for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). +* Added `Policy` field for [billing.CreateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#CreateBudgetPolicyRequest). +* Added `DatabricksGcpServiceAccount` field for [catalog.ValidateCredentialRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ValidateCredentialRequest). +* Added `AttachmentId` field for [dashboards.GenieAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAttachment). +* Added `ConversationId` field for [dashboards.GenieConversation](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieConversation). +* Added `MessageId` field for [dashboards.GenieMessage](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieMessage). +* Added `Description`, `Id`, `LastUpdatedTimestamp`, `Query`, `QueryResultMetadata` and `Title` fields for [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). +* Added `GenAiComputeTask` field for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). +* Added `GenAiComputeTask` field for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). +* Added `GenAiComputeTask` field for [jobs.Task](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Task). +* Added `RunName` field for [ml.CreateRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateRun). +* Added `RunName` field for [ml.RunInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#RunInfo). +* Added `RunName` field for [ml.UpdateRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#UpdateRun). +* Added `Lifetime` field for [oauth2.CreateServicePrincipalSecretRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateServicePrincipalSecretRequest). +* Added `ExpireTime` field for [oauth2.CreateServicePrincipalSecretResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateServicePrincipalSecretResponse). +* Added `ExpireTime` field for [oauth2.SecretInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#SecretInfo). +* Added `InstanceProfileArn` field for [serving.AmazonBedrockConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AmazonBedrockConfig). +* Added `Add`, `Principal` and `Remove` fields for [sharing.PermissionsChange](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#PermissionsChange). +* Added `ColumnsToRerank` field for [vectorsearch.QueryVectorIndexRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#QueryVectorIndexRequest). +* Added `Oracle` and `Teradata` enum values for [catalog.ConnectionType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ConnectionType). +* Added `FunctionArgumentsInvalidTypeException` and `MessageCancelledWhileExecutingException` enum values for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). +* Added `Waiting` enum value for [jobs.RunLifecycleStateV2State](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunLifecycleStateV2State). +* Added `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). +* Added `OauthClientCredentials` enum value for [sharing.AuthenticationType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#AuthenticationType). +* Added `Raw` enum value for [workspace.ExportFormat](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/workspace#ExportFormat). +* [Breaking] Changed `GetByName` method for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service to return [ml.GetExperimentByNameResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#GetExperimentByNameResponse). +* [Breaking] Changed `LogInputs` method for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service with new required argument order. +* [Breaking] Changed `SharePermissions` and `UpdatePermissions` methods for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service return type to become non-empty. +* [Breaking] Changed `SharePermissions` method for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service to return [sharing.GetSharePermissionsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#GetSharePermissionsResponse). +* [Breaking] Changed `UpdatePermissions` method for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service to return [sharing.UpdateSharePermissionsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#UpdateSharePermissionsResponse). +* Changed `PolicyId` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy) to no longer be required. +* [Breaking] Changed `PolicyId` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy) to no longer be required. +* [Breaking] Changed `Partitions` field for [cleanrooms.CleanRoomAssetTableLocalDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetTableLocalDetails) to type [cleanrooms.PartitionList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#PartitionList). +* [Breaking] Changed `Query` field for [dashboards.GenieAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAttachment) to type [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). +* Changed `Digest`, `Name`, `Source` and `SourceType` fields for [ml.Dataset](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Dataset) to be required. +* [Breaking] Changed `Digest`, `Name`, `Source` and `SourceType` fields for [ml.Dataset](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Dataset) to be required. +* [Breaking] Changed `Dataset` field for [ml.DatasetInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#DatasetInput) to be required. +* Changed `Dataset` field for [ml.DatasetInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#DatasetInput) to be required. +* Changed `Key` and `Value` fields for [ml.InputTag](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#InputTag) to be required. +* [Breaking] Changed `Key` and `Value` fields for [ml.InputTag](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#InputTag) to be required. +* [Breaking] Changed `ViewType` field for [ml.ListExperimentsRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ListExperimentsRequest) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). +* [Breaking] Changed `RunId` field for [ml.LogInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogInputs) to be required. +* [Breaking] Changed `ViewType` field for [ml.SearchExperiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchExperiments) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). +* [Breaking] Changed `RunViewType` field for [ml.SearchRuns](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchRuns) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). +* [Breaking] Removed `CustomTags` and `PolicyName` fields for [billing.CreateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#CreateBudgetPolicyRequest). +* [Breaking] Removed `CachedQuerySchema`, `Description`, `Id`, `InstructionId`, `InstructionTitle`, `LastUpdatedTimestamp`, `Query`, `StatementId` and `Title` fields for [dashboards.QueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#QueryAttachment). +* [Breaking] Removed `MaxResults` and `PageToken` fields for [sharing.UpdateSharePermissions](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#UpdateSharePermissions). +* [Breaking] Removed `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.SearchExperimentsViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchExperimentsViewType). +* [Breaking] Removed `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.SearchRunsRunViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchRunsRunViewType). + + ## [Release] Release v0.58.1 ### Internal Changes diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 71462987c..f4b5104ec 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,74 +1,13 @@ # NEXT CHANGELOG -## Release v0.59.0 +## Release v0.60.0 ### New Features and Improvements ### Bug Fixes -* Fix unlikely issue due to conflicting error details in `APIError`. - ### Documentation ### Internal Changes -* Update Jobs ListRuns API to support paginated responses ([#1151](https://github.com/databricks/databricks-sdk-go/pull/1151)) -* Add `poll.SimpleError` to mock waiter objects returning errors ([#1155](https://github.com/databricks/databricks-sdk-go/pull/1155)) -* Refactor `APIError` to expose different types of error details ([#1153](https://github.com/databricks/databricks-sdk-go/pull/1153)). -* Update Jobs ListJobs API to support paginated responses ([#1150](https://github.com/databricks/databricks-sdk-go/pull/1150)) -* Introduce automated tagging ([#1148](https://github.com/databricks/databricks-sdk-go/pull/1148)). -* Update Jobs GetJob API to support paginated responses ([#1133](https://github.com/databricks/databricks-sdk-go/pull/1133)). -* Update Jobs GetRun API to support paginated responses ([#1132](https://github.com/databricks/databricks-sdk-go/pull/1132)). - ### API Changes -* Added `GetSpace` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. -* Added `ListProviderShareAssets` method for [w.Providers](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#ProvidersAPI) workspace-level service. -* Added `BudgetPolicyId` and `EffectiveBudgetPolicyId` fields for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). -* Added `Policy` field for [billing.CreateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#CreateBudgetPolicyRequest). -* Added `DatabricksGcpServiceAccount` field for [catalog.ValidateCredentialRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ValidateCredentialRequest). -* Added `AttachmentId` field for [dashboards.GenieAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAttachment). -* Added `ConversationId` field for [dashboards.GenieConversation](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieConversation). -* Added `MessageId` field for [dashboards.GenieMessage](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieMessage). -* Added `Description`, `Id`, `LastUpdatedTimestamp`, `Query`, `QueryResultMetadata` and `Title` fields for [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). -* Added `GenAiComputeTask` field for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). -* Added `GenAiComputeTask` field for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). -* Added `GenAiComputeTask` field for [jobs.Task](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Task). -* Added `RunName` field for [ml.CreateRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateRun). -* Added `RunName` field for [ml.RunInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#RunInfo). -* Added `RunName` field for [ml.UpdateRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#UpdateRun). -* Added `Lifetime` field for [oauth2.CreateServicePrincipalSecretRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateServicePrincipalSecretRequest). -* Added `ExpireTime` field for [oauth2.CreateServicePrincipalSecretResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#CreateServicePrincipalSecretResponse). -* Added `ExpireTime` field for [oauth2.SecretInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#SecretInfo). -* Added `InstanceProfileArn` field for [serving.AmazonBedrockConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AmazonBedrockConfig). -* Added `Add`, `Principal` and `Remove` fields for [sharing.PermissionsChange](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#PermissionsChange). -* Added `ColumnsToRerank` field for [vectorsearch.QueryVectorIndexRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#QueryVectorIndexRequest). -* Added `Oracle` and `Teradata` enum values for [catalog.ConnectionType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ConnectionType). -* Added `FunctionArgumentsInvalidTypeException` and `MessageCancelledWhileExecutingException` enum values for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). -* Added `Waiting` enum value for [jobs.RunLifecycleStateV2State](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunLifecycleStateV2State). -* Added `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). -* Added `OauthClientCredentials` enum value for [sharing.AuthenticationType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#AuthenticationType). -* Added `Raw` enum value for [workspace.ExportFormat](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/workspace#ExportFormat). -* [Breaking] Changed `GetByName` method for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service to return [ml.GetExperimentByNameResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#GetExperimentByNameResponse). -* [Breaking] Changed `LogInputs` method for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service with new required argument order. -* [Breaking] Changed `SharePermissions` and `UpdatePermissions` methods for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service return type to become non-empty. -* [Breaking] Changed `SharePermissions` method for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service to return [sharing.GetSharePermissionsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#GetSharePermissionsResponse). -* [Breaking] Changed `UpdatePermissions` method for [w.Shares](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#SharesAPI) workspace-level service to return [sharing.UpdateSharePermissionsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#UpdateSharePermissionsResponse). -* Changed `PolicyId` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy) to no longer be required. -* [Breaking] Changed `PolicyId` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy) to no longer be required. -* [Breaking] Changed `Partitions` field for [cleanrooms.CleanRoomAssetTableLocalDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#CleanRoomAssetTableLocalDetails) to type [cleanrooms.PartitionList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/cleanrooms#PartitionList). -* [Breaking] Changed `Query` field for [dashboards.GenieAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAttachment) to type [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). -* Changed `Digest`, `Name`, `Source` and `SourceType` fields for [ml.Dataset](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Dataset) to be required. -* [Breaking] Changed `Digest`, `Name`, `Source` and `SourceType` fields for [ml.Dataset](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Dataset) to be required. -* [Breaking] Changed `Dataset` field for [ml.DatasetInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#DatasetInput) to be required. -* Changed `Dataset` field for [ml.DatasetInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#DatasetInput) to be required. -* Changed `Key` and `Value` fields for [ml.InputTag](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#InputTag) to be required. -* [Breaking] Changed `Key` and `Value` fields for [ml.InputTag](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#InputTag) to be required. -* [Breaking] Changed `ViewType` field for [ml.ListExperimentsRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ListExperimentsRequest) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). -* [Breaking] Changed `RunId` field for [ml.LogInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogInputs) to be required. -* [Breaking] Changed `ViewType` field for [ml.SearchExperiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchExperiments) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). -* [Breaking] Changed `RunViewType` field for [ml.SearchRuns](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchRuns) to type [ml.ViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ViewType). -* [Breaking] Removed `CustomTags` and `PolicyName` fields for [billing.CreateBudgetPolicyRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#CreateBudgetPolicyRequest). -* [Breaking] Removed `CachedQuerySchema`, `Description`, `Id`, `InstructionId`, `InstructionTitle`, `LastUpdatedTimestamp`, `Query`, `StatementId` and `Title` fields for [dashboards.QueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#QueryAttachment). -* [Breaking] Removed `MaxResults` and `PageToken` fields for [sharing.UpdateSharePermissions](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#UpdateSharePermissions). -* [Breaking] Removed `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.SearchExperimentsViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchExperimentsViewType). -* [Breaking] Removed `ActiveOnly`, `All` and `DeletedOnly` enum values for [ml.SearchRunsRunViewType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#SearchRunsRunViewType). From 2f2d945bb2c36583e2569d54eb7538981fa2aaf5 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Tue, 4 Mar 2025 17:33:44 +0100 Subject: [PATCH 25/54] [Fix] Properly parse the `RetryInfo` error detail (#1162) ## What changes are proposed in this pull request? This PR fixes unmarshalling of the `RetryInfo` error detail. Specifically, it properly unmarshal the underlying `google.protobuf.Duration` proto [as a string](https://github.com/protocolbuffers/protobuf/blob/main/src/google/protobuf/duration.proto#L92-L101). Note: we should ultimately consider using the actual proto generated objects instead of this custom code. ## How is this tested? Updated unit tests. NO_CHANGELOG=true --- apierr/details.go | 17 ++++++++++------- apierr/errors_test.go | 4 ++-- config/experimental/auth/dataplane/dataplane.go | 2 +- 3 files changed, 13 insertions(+), 10 deletions(-) diff --git a/apierr/details.go b/apierr/details.go index 1d7d5f1ab..5be9c5ea7 100644 --- a/apierr/details.go +++ b/apierr/details.go @@ -203,12 +203,11 @@ type requestInfoPb struct { // retryInfoPb is the wire-format representation of RetryInfo. It is used // internally to unmarshal RetryInfo from JSON. type retryInfoPb struct { - RetryDelay durationPb `json:"retry_delay"` -} - -type durationPb struct { - Seconds int64 `json:"seconds"` - Nanos int64 `json:"nanos"` + // The duration type is encoded as a string rather than an where the string + // ends in the suffix "s" (indicating seconds) and is preceded by a decimal + // number of seconds. For example, "3.000000001s", represents a duration of + // 3 seconds and 1 nanosecond. + RetryDelay string `json:"retry_delay"` } // debugInfoPb is the wire-format representation of DebugInfo. It is used @@ -340,7 +339,11 @@ func unmarshalDetails(d []byte) any { if err := json.Unmarshal(d, &pb); err != nil { return m // not a valid known type } - return RetryInfo{RetryDelay: time.Duration(pb.RetryDelay.Seconds)*time.Second + time.Duration(pb.RetryDelay.Nanos)*time.Nanosecond} + d, err := time.ParseDuration(pb.RetryDelay) + if err != nil { + return m // not a valid known type + } + return RetryInfo{RetryDelay: d} case debugInfoType: var pb debugInfoPb if err := json.Unmarshal(d, &pb); err != nil { diff --git a/apierr/errors_test.go b/apierr/errors_test.go index 61fa8d4fb..457882add 100644 --- a/apierr/errors_test.go +++ b/apierr/errors_test.go @@ -183,7 +183,7 @@ func TestGetAPIError(t *testing.T) { }, { "@type": "type.googleapis.com/google.rpc.RetryInfo", - "retry_delay": {"seconds": 1, "nanos": 1} + "retry_delay": "42.0000000012s" }, { "@type": "type.googleapis.com/google.rpc.DebugInfo", @@ -265,7 +265,7 @@ func TestGetAPIError(t *testing.T) { ServingData: "data", }, RetryInfo: &RetryInfo{ - RetryDelay: time.Second + time.Nanosecond, + RetryDelay: 42*time.Second + time.Nanosecond, }, DebugInfo: &DebugInfo{ StackEntries: []string{"entry1", "entry2"}, diff --git a/config/experimental/auth/dataplane/dataplane.go b/config/experimental/auth/dataplane/dataplane.go index 473032cf8..e9b943192 100644 --- a/config/experimental/auth/dataplane/dataplane.go +++ b/config/experimental/auth/dataplane/dataplane.go @@ -74,7 +74,7 @@ func (dpts *dataPlaneTokenSource) Token(ctx context.Context, endpoint string, au type tokenSource struct { client OAuthClient - cpts auth.TokenSource + cpts auth.TokenSource // authDetails string } From c33f4160577d613d0ace5ed09d1826422ec4c4a0 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Fri, 7 Mar 2025 14:18:06 +0100 Subject: [PATCH 26/54] Remove unnecessary config files and GitHub workflows (#1165) ## What changes are proposed in this pull request? Removes the following files: * `.codegen/changelog.md.tmpl`: The new automated tagging does not use this. API Changelogs are now generated at update time and use a separate template. * `.codegen/changelog_config.yml` and `.github/workflow/message.yml`: Changelogs are now manually written and we don't need to enforce the tags in the PR message. ## How is this tested? N/A ## Changelog Skip No user impact NO_CHANGELOG=true --- .codegen/changelog.md.tmpl | 91 ----------------------------------- .codegen/changelog_config.yml | 11 ----- .github/workflows/message.yml | 32 ------------ 3 files changed, 134 deletions(-) delete mode 100644 .codegen/changelog.md.tmpl delete mode 100644 .codegen/changelog_config.yml delete mode 100644 .github/workflows/message.yml diff --git a/.codegen/changelog.md.tmpl b/.codegen/changelog.md.tmpl deleted file mode 100644 index 748eabef6..000000000 --- a/.codegen/changelog.md.tmpl +++ /dev/null @@ -1,91 +0,0 @@ -# Version changelog - -## {{.Version}} -{{- range .GroupChanges}} - -### {{.Type.Message}} -{{range .Changes}} - * {{.}}. -{{- end}} -{{end}} -{{if .ApiChanges}} -### API Changes: -{{range .ApiChanges.GroupDiff}} - * {{.Action}} {{template "group-what" .}}{{if .Extra}} {{.Extra}}{{with .Other}} {{template "other-what" .}}{{end}}{{end}}. -{{- end}} - -OpenAPI SHA: {{.Sha}}, Date: {{.Changed}} -{{- end}}{{if .DependencyUpdates}} -Dependency updates: -{{range .DependencyUpdates}} - * {{.}}. -{{- end -}} -{{end}} - -## {{.PrevVersion}} - - -{{- define "group-what" -}} -{{if gt (len .Changes) 1 -}} {{template "single-what" .Changes.First}}{{end -}} -{{range .Changes.Middle -}}, {{template "single-what" .}}{{end -}} -{{if gt (len .Changes) 1}} and {{end}}{{template "single-what" .Changes.Last}}{{template "suffix-what" .}} -{{- end -}} - - -{{- define "single-what" -}} - {{if eq .X "package" -}} - [{{.Package.Name}}](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}) package - {{- else if eq .X "service" -}} - {{template "service" .Service}} - {{- else if eq .X "method" -}} - `{{.Method.PascalName}}` - {{- else if eq .X "entity" -}} - {{template "entity" .Entity}} - {{- else if eq .X "field" -}} - `{{.Field.PascalName}}` - {{- end}} -{{- end -}} - -{{- define "suffix-what" -}} - {{if eq .Type "method"}} method{{template "plural-suffix" .Changes}} for {{template "service" .Parent.Service}} - {{- else if eq .Type "field"}} field{{template "plural-suffix" .Changes}} for {{template "entity" .Parent.Entity}} - {{- end}} -{{- end -}} - -{{- define "plural-suffix" -}} -{{if gt (len .) 1}}s{{end}} -{{- end -}} - -{{- define "other-what" -}} - {{if eq .X "package" -}} - [{{.Package.Name}}](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}) package - {{- else if eq .X "service" -}} - {{template "service" .Service}} - {{- else if eq .X "method" -}} - `{{.Method.PascalName}}` method for {{template "service" .Method.Service}} - {{- else if eq .X "entity" -}} - {{template "entity" .Entity}} - {{- else if eq .X "field" -}} - `{{.Field.PascalName}}` field for {{template "entity" .Field.Of}} - {{- end}} -{{- end -}} - -{{- define "service" -}} - [{{if .IsAccounts}}a{{else}}w{{end}}.{{.PascalName}}](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}#{{.PascalName}}API) {{if .IsAccounts}}account{{else}}workspace{{end}}-level service -{{- end -}} - -{{- define "entity" -}} - {{- if not . }}any /* ERROR */ - {{- else if .IsEmpty}}`any` - {{- else if .PascalName}}[{{.Package.Name}}.{{.PascalName}}](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}#{{.PascalName}}) - {{- else if .IsAny}}`any` - {{- else if .IsString}}`string` - {{- else if .IsBool}}`bool` - {{- else if .IsInt64}}`int64` - {{- else if .IsFloat64}}`float64` - {{- else if .IsInt}}`int` - {{- else if .ArrayValue }}[]{{template "entity" .ArrayValue}} - {{- else if .MapValue }}map[string]{{template "entity" .MapValue}} - {{- else}}[{{.Package.Name}}.{{.PascalName}}](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/{{.Package.Name}}#{{.PascalName}}) - {{- end -}} -{{- end -}} diff --git a/.codegen/changelog_config.yml b/.codegen/changelog_config.yml deleted file mode 100644 index c137c72bb..000000000 --- a/.codegen/changelog_config.yml +++ /dev/null @@ -1,11 +0,0 @@ -change_types: - - message: New Features and Improvements - tag: "[Feature]" - - message: Bug Fixes - tag: "[Fix]" - - message: Documentation - tag: "[Doc]" - - message: Internal Changes - tag: "[Internal]" - # Default for messages without a tag - - message: Other Changes \ No newline at end of file diff --git a/.github/workflows/message.yml b/.github/workflows/message.yml deleted file mode 100644 index c542c1df1..000000000 --- a/.github/workflows/message.yml +++ /dev/null @@ -1,32 +0,0 @@ -name: Validate Commit Message - -on: - pull_request: - types: [opened, synchronize, edited] - merge_group: - types: [checks_requested] - -jobs: - validate: - runs-on: ubuntu-latest - # GitHub required checks are shared between PRs and the Merge Queue. - # Since there is no PR title on Merge Queue, we need to trigger and - # skip this test for Merge Queue to succeed. - if: github.event_name == 'pull_request' - steps: - - name: Checkout - uses: actions/checkout@v3 - with: - fetch-depth: 0 - - - name: Validate Tag - env: - TITLE: ${{ github.event.pull_request.title }} - run: | - TAG=$(echo "$TITLE" | sed -ne 's/\[\(.*\)\].*/\1/p') - if grep -q "tag: \"\[$TAG\]\"" .codegen/changelog_config.yml; then - echo "Valid tag found: [$TAG]" - else - echo "Invalid or missing tag in commit message: [$TAG]" - exit 1 - fi \ No newline at end of file From 19e0348fb08bb784b06e8f18ab4b01286f24df38 Mon Sep 17 00:00:00 2001 From: Parth Bansal Date: Tue, 11 Mar 2025 15:17:52 +0100 Subject: [PATCH 27/54] Update OpenAPI spec and Update mockery version (#1168) ## What changes are proposed in this pull request? This PR updates SDK to the latest OpenAPI spec. This PR also updates the mockery version as the currently used version is not compatible with go 1.24.0. ## How is this tested? Existing Tests --- .codegen.json | 2 +- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + NEXT_CHANGELOG.md | 7 + experimental/mocks/mock_workspace_client.go | 9 + .../mocks/service/apps/mock_apps_interface.go | 2 +- .../billing/mock_billable_usage_interface.go | 2 +- .../billing/mock_budget_policy_interface.go | 2 +- .../service/billing/mock_budgets_interface.go | 2 +- .../billing/mock_log_delivery_interface.go | 2 +- .../mock_usage_dashboards_interface.go | 2 +- ...account_metastore_assignments_interface.go | 2 +- .../mock_account_metastores_interface.go | 2 +- ...k_account_storage_credentials_interface.go | 2 +- .../mock_artifact_allowlists_interface.go | 2 +- .../catalog/mock_catalogs_interface.go | 2 +- .../catalog/mock_connections_interface.go | 2 +- .../catalog/mock_credentials_interface.go | 2 +- .../mock_external_locations_interface.go | 2 +- .../catalog/mock_functions_interface.go | 2 +- .../service/catalog/mock_grants_interface.go | 2 +- .../catalog/mock_metastores_interface.go | 2 +- .../catalog/mock_model_versions_interface.go | 2 +- .../catalog/mock_online_tables_interface.go | 2 +- .../mock_quality_monitors_interface.go | 2 +- .../mock_registered_models_interface.go | 2 +- .../catalog/mock_resource_quotas_interface.go | 2 +- .../service/catalog/mock_schemas_interface.go | 2 +- .../mock_storage_credentials_interface.go | 2 +- .../catalog/mock_system_schemas_interface.go | 2 +- .../mock_table_constraints_interface.go | 2 +- .../service/catalog/mock_tables_interface.go | 2 +- ...k_temporary_table_credentials_interface.go | 2 +- .../service/catalog/mock_volumes_interface.go | 2 +- .../mock_workspace_bindings_interface.go | 2 +- .../mock_clean_room_assets_interface.go | 2 +- .../mock_clean_room_task_runs_interface.go | 2 +- .../cleanrooms/mock_clean_rooms_interface.go | 2 +- .../mock_cluster_policies_interface.go | 2 +- .../compute/mock_clusters_interface.go | 2 +- .../mock_command_execution_interface.go | 2 +- .../mock_global_init_scripts_interface.go | 2 +- .../compute/mock_instance_pools_interface.go | 2 +- .../mock_instance_profiles_interface.go | 2 +- .../compute/mock_libraries_interface.go | 2 +- ...olicy_compliance_for_clusters_interface.go | 2 +- .../compute/mock_policy_families_interface.go | 2 +- .../dashboards/mock_genie_interface.go | 182 ++++++++- .../mock_lakeview_embedded_interface.go | 2 +- .../dashboards/mock_lakeview_interface.go | 2 +- .../mock_query_execution_interface.go | 2 +- .../service/files/mock_dbfs_interface.go | 2 +- .../service/files/mock_files_interface.go | 2 +- .../iam/mock_access_control_interface.go | 2 +- .../mock_account_access_control_interface.go | 2 +- ..._account_access_control_proxy_interface.go | 2 +- .../iam/mock_account_groups_interface.go | 2 +- ...ck_account_service_principals_interface.go | 2 +- .../iam/mock_account_users_interface.go | 2 +- .../iam/mock_current_user_interface.go | 2 +- .../service/iam/mock_groups_interface.go | 2 +- .../mock_permission_migration_interface.go | 2 +- .../service/iam/mock_permissions_interface.go | 2 +- .../iam/mock_service_principals_interface.go | 2 +- .../mocks/service/iam/mock_users_interface.go | 2 +- .../mock_workspace_assignment_interface.go | 2 +- .../mocks/service/jobs/mock_jobs_interface.go | 2 +- ...ck_policy_compliance_for_jobs_interface.go | 2 +- .../mock_consumer_fulfillments_interface.go | 2 +- .../mock_consumer_installations_interface.go | 2 +- .../mock_consumer_listings_interface.go | 2 +- ...umer_personalization_requests_interface.go | 2 +- .../mock_consumer_providers_interface.go | 2 +- ...ock_provider_exchange_filters_interface.go | 2 +- .../mock_provider_exchanges_interface.go | 2 +- .../mock_provider_files_interface.go | 2 +- .../mock_provider_listings_interface.go | 2 +- ...ider_personalization_requests_interface.go | 2 +- ...provider_analytics_dashboards_interface.go | 2 +- .../mock_provider_providers_interface.go | 2 +- .../service/ml/mock_experiments_interface.go | 2 +- .../service/ml/mock_forecasting_interface.go | 353 ++++++++++++++++++ .../ml/mock_model_registry_interface.go | 2 +- ...ock_account_federation_policy_interface.go | 2 +- .../mock_custom_app_integration_interface.go | 2 +- .../mock_o_auth_published_apps_interface.go | 2 +- ...ock_published_app_integration_interface.go | 2 +- ...e_principal_federation_policy_interface.go | 2 +- ...ock_service_principal_secrets_interface.go | 2 +- .../pipelines/mock_pipelines_interface.go | 2 +- .../mock_credentials_interface.go | 2 +- .../mock_encryption_keys_interface.go | 2 +- .../provisioning/mock_networks_interface.go | 2 +- .../mock_private_access_interface.go | 2 +- .../provisioning/mock_storage_interface.go | 2 +- .../mock_vpc_endpoints_interface.go | 2 +- .../provisioning/mock_workspaces_interface.go | 2 +- ..._serving_endpoints_data_plane_interface.go | 2 +- .../mock_serving_endpoints_interface.go | 2 +- .../mock_account_ip_access_lists_interface.go | 2 +- .../mock_account_settings_interface.go | 12 +- ...board_embedding_access_policy_interface.go | 2 +- ...rd_embedding_approved_domains_interface.go | 2 +- ...mock_automatic_cluster_update_interface.go | 2 +- ...k_compliance_security_profile_interface.go | 2 +- .../mock_credentials_manager_interface.go | 2 +- .../mock_csp_enablement_account_interface.go | 2 +- .../mock_default_namespace_interface.go | 2 +- .../mock_disable_legacy_access_interface.go | 2 +- .../mock_disable_legacy_dbfs_interface.go | 2 +- .../mock_disable_legacy_features_interface.go | 2 +- .../mock_enable_ip_access_lists_interface.go | 2 +- ..._enhanced_security_monitoring_interface.go | 2 +- .../mock_esm_enablement_account_interface.go | 2 +- .../mock_ip_access_lists_interface.go | 2 +- .../mock_network_connectivity_interface.go | 2 +- ...ock_notification_destinations_interface.go | 2 +- .../mock_personal_compute_interface.go | 2 +- ...ock_restrict_workspace_admins_interface.go | 2 +- .../settings/mock_settings_interface.go | 20 +- .../mock_token_management_interface.go | 2 +- .../service/settings/mock_tokens_interface.go | 2 +- .../settings/mock_workspace_conf_interface.go | 2 +- .../sharing/mock_providers_interface.go | 2 +- .../mock_recipient_activation_interface.go | 2 +- .../sharing/mock_recipients_interface.go | 2 +- .../service/sharing/mock_shares_interface.go | 2 +- .../service/sql/mock_alerts_interface.go | 2 +- .../sql/mock_alerts_legacy_interface.go | 2 +- .../sql/mock_dashboard_widgets_interface.go | 2 +- .../service/sql/mock_dashboards_interface.go | 2 +- .../sql/mock_data_sources_interface.go | 2 +- .../sql/mock_dbsql_permissions_interface.go | 2 +- .../service/sql/mock_queries_interface.go | 2 +- .../sql/mock_queries_legacy_interface.go | 2 +- .../sql/mock_query_history_interface.go | 2 +- .../mock_query_visualizations_interface.go | 2 +- ...k_query_visualizations_legacy_interface.go | 2 +- .../sql/mock_redash_config_interface.go | 2 +- .../sql/mock_statement_execution_interface.go | 2 +- .../service/sql/mock_warehouses_interface.go | 2 +- .../mock_vector_search_endpoints_interface.go | 2 +- .../mock_vector_search_indexes_interface.go | 2 +- .../mock_git_credentials_interface.go | 2 +- .../service/workspace/mock_repos_interface.go | 2 +- .../workspace/mock_secrets_interface.go | 2 +- .../workspace/mock_workspace_interface.go | 2 +- service/compute/impl.go | 2 + service/compute/model.go | 62 +-- service/dashboards/api.go | 54 ++- service/dashboards/impl.go | 20 + service/dashboards/interface.go | 21 +- service/dashboards/model.go | 87 +++-- service/files/impl.go | 3 +- service/files/model.go | 6 +- service/jobs/impl.go | 38 +- service/marketplace/model.go | 6 +- service/ml/api.go | 159 +++++++- service/ml/impl.go | 26 ++ service/ml/interface.go | 15 + service/ml/model.go | 150 ++++++++ service/oauth2/model.go | 9 +- service/pkg.go | 9 +- service/serving/impl.go | 1 - service/serving/model.go | 6 + service/settings/impl.go | 7 - service/sharing/impl.go | 3 - service/vectorsearch/model.go | 1 + tagging.py | 7 +- workspace_client.go | 5 + 170 files changed, 1291 insertions(+), 270 deletions(-) create mode 100644 experimental/mocks/service/ml/mock_forecasting_interface.go diff --git a/.codegen.json b/.codegen.json index 9da4ed72d..f9c316f89 100644 --- a/.codegen.json +++ b/.codegen.json @@ -10,7 +10,7 @@ ], "post_generate": [ "make fmt", - "go run github.com/vektra/mockery/v2@0229bd4bb4357cd09af7ac15ecab85022d296b12" + "go run github.com/vektra/mockery/v2@bfd46e35b15c2689ced221299bdcdeeff8aa0be3" ] } } \ No newline at end of file diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 02c4790ad..a7b80d538 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -e5c870006a536121442cfd2441bdc8a5fb76ae1e \ No newline at end of file +cd641c9dd4febe334b339dd7878d099dcf0eeab5 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index d2da9a3e1..da04156a9 100644 --- a/.gitattributes +++ b/.gitattributes @@ -80,6 +80,7 @@ experimental/mocks/service/marketplace/mock_provider_personalization_requests_in experimental/mocks/service/marketplace/mock_provider_provider_analytics_dashboards_interface.go linguist-generated=true experimental/mocks/service/marketplace/mock_provider_providers_interface.go linguist-generated=true experimental/mocks/service/ml/mock_experiments_interface.go linguist-generated=true +experimental/mocks/service/ml/mock_forecasting_interface.go linguist-generated=true experimental/mocks/service/ml/mock_model_registry_interface.go linguist-generated=true experimental/mocks/service/oauth2/mock_account_federation_policy_interface.go linguist-generated=true experimental/mocks/service/oauth2/mock_custom_app_integration_interface.go linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index f4b5104ec..15fdcd0ce 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -11,3 +11,10 @@ ### Internal Changes ### API Changes +* Added [w.Forecasting](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ForecastingAPI) workspace-level service. +* Added `ExecuteMessageAttachmentQuery` and `GetMessageAttachmentQueryResult` methods for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Added `StatementId` field for [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). +* Added `BudgetPolicyId` field for [serving.CreateServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#CreateServingEndpoint). +* Added `BudgetPolicyId` field for [serving.ServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpoint). +* Added `BudgetPolicyId` field for [serving.ServingEndpointDetailed](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointDetailed). +* Added `CouldNotGetModelDeploymentsException` enum value for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index ca2bffb53..b52b79a32 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -139,6 +139,7 @@ func NewMockWorkspaceClient(t interface { Workspace: workspace.NewMockWorkspaceInterface(t), WorkspaceBindings: catalog.NewMockWorkspaceBindingsInterface(t), WorkspaceConf: settings.NewMockWorkspaceConfInterface(t), + Forecasting: ml.NewMockForecastingInterface(t), }, } @@ -1029,3 +1030,11 @@ func (m *MockWorkspaceClient) GetMockWorkspaceConfAPI() *settings.MockWorkspaceC } return api } + +func (m *MockWorkspaceClient) GetMockForecastingAPI() *ml.MockForecastingInterface { + api, ok := m.WorkspaceClient.Forecasting.(*ml.MockForecastingInterface) + if !ok { + panic(fmt.Sprintf("expected Forecasting to be *ml.MockForecastingInterface, actual was %T", m.WorkspaceClient.Forecasting)) + } + return api +} diff --git a/experimental/mocks/service/apps/mock_apps_interface.go b/experimental/mocks/service/apps/mock_apps_interface.go index 2b91a11b2..30200cabb 100644 --- a/experimental/mocks/service/apps/mock_apps_interface.go +++ b/experimental/mocks/service/apps/mock_apps_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package apps diff --git a/experimental/mocks/service/billing/mock_billable_usage_interface.go b/experimental/mocks/service/billing/mock_billable_usage_interface.go index 90c5b2d79..86220e305 100644 --- a/experimental/mocks/service/billing/mock_billable_usage_interface.go +++ b/experimental/mocks/service/billing/mock_billable_usage_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package billing diff --git a/experimental/mocks/service/billing/mock_budget_policy_interface.go b/experimental/mocks/service/billing/mock_budget_policy_interface.go index 7028b86fc..128dd38e5 100644 --- a/experimental/mocks/service/billing/mock_budget_policy_interface.go +++ b/experimental/mocks/service/billing/mock_budget_policy_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package billing diff --git a/experimental/mocks/service/billing/mock_budgets_interface.go b/experimental/mocks/service/billing/mock_budgets_interface.go index 59256ce10..3a1d9dfaf 100644 --- a/experimental/mocks/service/billing/mock_budgets_interface.go +++ b/experimental/mocks/service/billing/mock_budgets_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package billing diff --git a/experimental/mocks/service/billing/mock_log_delivery_interface.go b/experimental/mocks/service/billing/mock_log_delivery_interface.go index c163a9c26..04e562785 100644 --- a/experimental/mocks/service/billing/mock_log_delivery_interface.go +++ b/experimental/mocks/service/billing/mock_log_delivery_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package billing diff --git a/experimental/mocks/service/billing/mock_usage_dashboards_interface.go b/experimental/mocks/service/billing/mock_usage_dashboards_interface.go index ee0e87850..95ac282b4 100644 --- a/experimental/mocks/service/billing/mock_usage_dashboards_interface.go +++ b/experimental/mocks/service/billing/mock_usage_dashboards_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package billing diff --git a/experimental/mocks/service/catalog/mock_account_metastore_assignments_interface.go b/experimental/mocks/service/catalog/mock_account_metastore_assignments_interface.go index 968ac6044..8ca65ca32 100644 --- a/experimental/mocks/service/catalog/mock_account_metastore_assignments_interface.go +++ b/experimental/mocks/service/catalog/mock_account_metastore_assignments_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_account_metastores_interface.go b/experimental/mocks/service/catalog/mock_account_metastores_interface.go index dd4105954..e1a5026bc 100644 --- a/experimental/mocks/service/catalog/mock_account_metastores_interface.go +++ b/experimental/mocks/service/catalog/mock_account_metastores_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_account_storage_credentials_interface.go b/experimental/mocks/service/catalog/mock_account_storage_credentials_interface.go index 2646d9cc2..9fdb50ea4 100644 --- a/experimental/mocks/service/catalog/mock_account_storage_credentials_interface.go +++ b/experimental/mocks/service/catalog/mock_account_storage_credentials_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_artifact_allowlists_interface.go b/experimental/mocks/service/catalog/mock_artifact_allowlists_interface.go index 72308e190..2798fe50a 100644 --- a/experimental/mocks/service/catalog/mock_artifact_allowlists_interface.go +++ b/experimental/mocks/service/catalog/mock_artifact_allowlists_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_catalogs_interface.go b/experimental/mocks/service/catalog/mock_catalogs_interface.go index 1d5a4665e..1af035c79 100644 --- a/experimental/mocks/service/catalog/mock_catalogs_interface.go +++ b/experimental/mocks/service/catalog/mock_catalogs_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_connections_interface.go b/experimental/mocks/service/catalog/mock_connections_interface.go index a9558ae5e..3f7deef26 100644 --- a/experimental/mocks/service/catalog/mock_connections_interface.go +++ b/experimental/mocks/service/catalog/mock_connections_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_credentials_interface.go b/experimental/mocks/service/catalog/mock_credentials_interface.go index 3fea48ac7..c4bc1a6eb 100644 --- a/experimental/mocks/service/catalog/mock_credentials_interface.go +++ b/experimental/mocks/service/catalog/mock_credentials_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_external_locations_interface.go b/experimental/mocks/service/catalog/mock_external_locations_interface.go index cc4d423c4..39ef7fef9 100644 --- a/experimental/mocks/service/catalog/mock_external_locations_interface.go +++ b/experimental/mocks/service/catalog/mock_external_locations_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_functions_interface.go b/experimental/mocks/service/catalog/mock_functions_interface.go index 1fdd8664b..d395a176a 100644 --- a/experimental/mocks/service/catalog/mock_functions_interface.go +++ b/experimental/mocks/service/catalog/mock_functions_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_grants_interface.go b/experimental/mocks/service/catalog/mock_grants_interface.go index f234ab21c..348f0d7ff 100644 --- a/experimental/mocks/service/catalog/mock_grants_interface.go +++ b/experimental/mocks/service/catalog/mock_grants_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_metastores_interface.go b/experimental/mocks/service/catalog/mock_metastores_interface.go index 2a347cecc..2f1437bc4 100644 --- a/experimental/mocks/service/catalog/mock_metastores_interface.go +++ b/experimental/mocks/service/catalog/mock_metastores_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_model_versions_interface.go b/experimental/mocks/service/catalog/mock_model_versions_interface.go index 99e498e21..210ec9623 100644 --- a/experimental/mocks/service/catalog/mock_model_versions_interface.go +++ b/experimental/mocks/service/catalog/mock_model_versions_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_online_tables_interface.go b/experimental/mocks/service/catalog/mock_online_tables_interface.go index d49243937..d765ff6fc 100644 --- a/experimental/mocks/service/catalog/mock_online_tables_interface.go +++ b/experimental/mocks/service/catalog/mock_online_tables_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_quality_monitors_interface.go b/experimental/mocks/service/catalog/mock_quality_monitors_interface.go index bd884c703..b17784933 100644 --- a/experimental/mocks/service/catalog/mock_quality_monitors_interface.go +++ b/experimental/mocks/service/catalog/mock_quality_monitors_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_registered_models_interface.go b/experimental/mocks/service/catalog/mock_registered_models_interface.go index 025d71d85..c090f08b0 100644 --- a/experimental/mocks/service/catalog/mock_registered_models_interface.go +++ b/experimental/mocks/service/catalog/mock_registered_models_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_resource_quotas_interface.go b/experimental/mocks/service/catalog/mock_resource_quotas_interface.go index b25aab962..b0a82cf30 100644 --- a/experimental/mocks/service/catalog/mock_resource_quotas_interface.go +++ b/experimental/mocks/service/catalog/mock_resource_quotas_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_schemas_interface.go b/experimental/mocks/service/catalog/mock_schemas_interface.go index 4bad64dbd..b870b2fb9 100644 --- a/experimental/mocks/service/catalog/mock_schemas_interface.go +++ b/experimental/mocks/service/catalog/mock_schemas_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_storage_credentials_interface.go b/experimental/mocks/service/catalog/mock_storage_credentials_interface.go index ee4fafbc4..cb1bdd3a6 100644 --- a/experimental/mocks/service/catalog/mock_storage_credentials_interface.go +++ b/experimental/mocks/service/catalog/mock_storage_credentials_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_system_schemas_interface.go b/experimental/mocks/service/catalog/mock_system_schemas_interface.go index be608eebc..f05428343 100644 --- a/experimental/mocks/service/catalog/mock_system_schemas_interface.go +++ b/experimental/mocks/service/catalog/mock_system_schemas_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_table_constraints_interface.go b/experimental/mocks/service/catalog/mock_table_constraints_interface.go index 73dfb0d2c..9755985e4 100644 --- a/experimental/mocks/service/catalog/mock_table_constraints_interface.go +++ b/experimental/mocks/service/catalog/mock_table_constraints_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_tables_interface.go b/experimental/mocks/service/catalog/mock_tables_interface.go index d0da4a5c9..0f921c171 100644 --- a/experimental/mocks/service/catalog/mock_tables_interface.go +++ b/experimental/mocks/service/catalog/mock_tables_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_temporary_table_credentials_interface.go b/experimental/mocks/service/catalog/mock_temporary_table_credentials_interface.go index 2948fe612..8504bda52 100644 --- a/experimental/mocks/service/catalog/mock_temporary_table_credentials_interface.go +++ b/experimental/mocks/service/catalog/mock_temporary_table_credentials_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_volumes_interface.go b/experimental/mocks/service/catalog/mock_volumes_interface.go index b4a6bdd0c..976b9c631 100644 --- a/experimental/mocks/service/catalog/mock_volumes_interface.go +++ b/experimental/mocks/service/catalog/mock_volumes_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/catalog/mock_workspace_bindings_interface.go b/experimental/mocks/service/catalog/mock_workspace_bindings_interface.go index 1bce0578f..8f2306efd 100644 --- a/experimental/mocks/service/catalog/mock_workspace_bindings_interface.go +++ b/experimental/mocks/service/catalog/mock_workspace_bindings_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package catalog diff --git a/experimental/mocks/service/cleanrooms/mock_clean_room_assets_interface.go b/experimental/mocks/service/cleanrooms/mock_clean_room_assets_interface.go index 05bbc25a7..fbcda4b72 100644 --- a/experimental/mocks/service/cleanrooms/mock_clean_room_assets_interface.go +++ b/experimental/mocks/service/cleanrooms/mock_clean_room_assets_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package cleanrooms diff --git a/experimental/mocks/service/cleanrooms/mock_clean_room_task_runs_interface.go b/experimental/mocks/service/cleanrooms/mock_clean_room_task_runs_interface.go index 1e052c3cd..6a64f5334 100644 --- a/experimental/mocks/service/cleanrooms/mock_clean_room_task_runs_interface.go +++ b/experimental/mocks/service/cleanrooms/mock_clean_room_task_runs_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package cleanrooms diff --git a/experimental/mocks/service/cleanrooms/mock_clean_rooms_interface.go b/experimental/mocks/service/cleanrooms/mock_clean_rooms_interface.go index 0a7271e1b..387fa87a8 100644 --- a/experimental/mocks/service/cleanrooms/mock_clean_rooms_interface.go +++ b/experimental/mocks/service/cleanrooms/mock_clean_rooms_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package cleanrooms diff --git a/experimental/mocks/service/compute/mock_cluster_policies_interface.go b/experimental/mocks/service/compute/mock_cluster_policies_interface.go index 869afe49e..687c8b78a 100644 --- a/experimental/mocks/service/compute/mock_cluster_policies_interface.go +++ b/experimental/mocks/service/compute/mock_cluster_policies_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package compute diff --git a/experimental/mocks/service/compute/mock_clusters_interface.go b/experimental/mocks/service/compute/mock_clusters_interface.go index be48ba31e..7e9f2c137 100644 --- a/experimental/mocks/service/compute/mock_clusters_interface.go +++ b/experimental/mocks/service/compute/mock_clusters_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package compute diff --git a/experimental/mocks/service/compute/mock_command_execution_interface.go b/experimental/mocks/service/compute/mock_command_execution_interface.go index f1908b864..85e261500 100644 --- a/experimental/mocks/service/compute/mock_command_execution_interface.go +++ b/experimental/mocks/service/compute/mock_command_execution_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package compute diff --git a/experimental/mocks/service/compute/mock_global_init_scripts_interface.go b/experimental/mocks/service/compute/mock_global_init_scripts_interface.go index d57c005c1..ef7c64801 100644 --- a/experimental/mocks/service/compute/mock_global_init_scripts_interface.go +++ b/experimental/mocks/service/compute/mock_global_init_scripts_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package compute diff --git a/experimental/mocks/service/compute/mock_instance_pools_interface.go b/experimental/mocks/service/compute/mock_instance_pools_interface.go index d81eb239f..19ad04d20 100644 --- a/experimental/mocks/service/compute/mock_instance_pools_interface.go +++ b/experimental/mocks/service/compute/mock_instance_pools_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package compute diff --git a/experimental/mocks/service/compute/mock_instance_profiles_interface.go b/experimental/mocks/service/compute/mock_instance_profiles_interface.go index 4b99f32bf..fdace237b 100644 --- a/experimental/mocks/service/compute/mock_instance_profiles_interface.go +++ b/experimental/mocks/service/compute/mock_instance_profiles_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package compute diff --git a/experimental/mocks/service/compute/mock_libraries_interface.go b/experimental/mocks/service/compute/mock_libraries_interface.go index 5f9a8696a..23304e2d5 100644 --- a/experimental/mocks/service/compute/mock_libraries_interface.go +++ b/experimental/mocks/service/compute/mock_libraries_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package compute diff --git a/experimental/mocks/service/compute/mock_policy_compliance_for_clusters_interface.go b/experimental/mocks/service/compute/mock_policy_compliance_for_clusters_interface.go index 60ebb07c6..7bc27134e 100644 --- a/experimental/mocks/service/compute/mock_policy_compliance_for_clusters_interface.go +++ b/experimental/mocks/service/compute/mock_policy_compliance_for_clusters_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package compute diff --git a/experimental/mocks/service/compute/mock_policy_families_interface.go b/experimental/mocks/service/compute/mock_policy_families_interface.go index 8062c31d7..55b041074 100644 --- a/experimental/mocks/service/compute/mock_policy_families_interface.go +++ b/experimental/mocks/service/compute/mock_policy_families_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package compute diff --git a/experimental/mocks/service/dashboards/mock_genie_interface.go b/experimental/mocks/service/dashboards/mock_genie_interface.go index fef6abd67..f3e485435 100644 --- a/experimental/mocks/service/dashboards/mock_genie_interface.go +++ b/experimental/mocks/service/dashboards/mock_genie_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package dashboards @@ -159,6 +159,65 @@ func (_c *MockGenieInterface_CreateMessageAndWait_Call) RunAndReturn(run func(co return _c } +// ExecuteMessageAttachmentQuery provides a mock function with given fields: ctx, request +func (_m *MockGenieInterface) ExecuteMessageAttachmentQuery(ctx context.Context, request dashboards.GenieExecuteMessageAttachmentQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ExecuteMessageAttachmentQuery") + } + + var r0 *dashboards.GenieGetMessageQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieExecuteMessageAttachmentQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieExecuteMessageAttachmentQueryRequest) *dashboards.GenieGetMessageQueryResultResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGetMessageQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieExecuteMessageAttachmentQueryRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_ExecuteMessageAttachmentQuery_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ExecuteMessageAttachmentQuery' +type MockGenieInterface_ExecuteMessageAttachmentQuery_Call struct { + *mock.Call +} + +// ExecuteMessageAttachmentQuery is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GenieExecuteMessageAttachmentQueryRequest +func (_e *MockGenieInterface_Expecter) ExecuteMessageAttachmentQuery(ctx interface{}, request interface{}) *MockGenieInterface_ExecuteMessageAttachmentQuery_Call { + return &MockGenieInterface_ExecuteMessageAttachmentQuery_Call{Call: _e.mock.On("ExecuteMessageAttachmentQuery", ctx, request)} +} + +func (_c *MockGenieInterface_ExecuteMessageAttachmentQuery_Call) Run(run func(ctx context.Context, request dashboards.GenieExecuteMessageAttachmentQueryRequest)) *MockGenieInterface_ExecuteMessageAttachmentQuery_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GenieExecuteMessageAttachmentQueryRequest)) + }) + return _c +} + +func (_c *MockGenieInterface_ExecuteMessageAttachmentQuery_Call) Return(_a0 *dashboards.GenieGetMessageQueryResultResponse, _a1 error) *MockGenieInterface_ExecuteMessageAttachmentQuery_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_ExecuteMessageAttachmentQuery_Call) RunAndReturn(run func(context.Context, dashboards.GenieExecuteMessageAttachmentQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)) *MockGenieInterface_ExecuteMessageAttachmentQuery_Call { + _c.Call.Return(run) + return _c +} + // ExecuteMessageQuery provides a mock function with given fields: ctx, request func (_m *MockGenieInterface) ExecuteMessageQuery(ctx context.Context, request dashboards.GenieExecuteMessageQueryRequest) (*dashboards.GenieGetMessageQueryResultResponse, error) { ret := _m.Called(ctx, request) @@ -277,6 +336,127 @@ func (_c *MockGenieInterface_GetMessage_Call) RunAndReturn(run func(context.Cont return _c } +// GetMessageAttachmentQueryResult provides a mock function with given fields: ctx, request +func (_m *MockGenieInterface) GetMessageAttachmentQueryResult(ctx context.Context, request dashboards.GenieGetMessageAttachmentQueryResultRequest) (*dashboards.GenieGetMessageQueryResultResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetMessageAttachmentQueryResult") + } + + var r0 *dashboards.GenieGetMessageQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetMessageAttachmentQueryResultRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetMessageAttachmentQueryResultRequest) *dashboards.GenieGetMessageQueryResultResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGetMessageQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieGetMessageAttachmentQueryResultRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetMessageAttachmentQueryResult_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMessageAttachmentQueryResult' +type MockGenieInterface_GetMessageAttachmentQueryResult_Call struct { + *mock.Call +} + +// GetMessageAttachmentQueryResult is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GenieGetMessageAttachmentQueryResultRequest +func (_e *MockGenieInterface_Expecter) GetMessageAttachmentQueryResult(ctx interface{}, request interface{}) *MockGenieInterface_GetMessageAttachmentQueryResult_Call { + return &MockGenieInterface_GetMessageAttachmentQueryResult_Call{Call: _e.mock.On("GetMessageAttachmentQueryResult", ctx, request)} +} + +func (_c *MockGenieInterface_GetMessageAttachmentQueryResult_Call) Run(run func(ctx context.Context, request dashboards.GenieGetMessageAttachmentQueryResultRequest)) *MockGenieInterface_GetMessageAttachmentQueryResult_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GenieGetMessageAttachmentQueryResultRequest)) + }) + return _c +} + +func (_c *MockGenieInterface_GetMessageAttachmentQueryResult_Call) Return(_a0 *dashboards.GenieGetMessageQueryResultResponse, _a1 error) *MockGenieInterface_GetMessageAttachmentQueryResult_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetMessageAttachmentQueryResult_Call) RunAndReturn(run func(context.Context, dashboards.GenieGetMessageAttachmentQueryResultRequest) (*dashboards.GenieGetMessageQueryResultResponse, error)) *MockGenieInterface_GetMessageAttachmentQueryResult_Call { + _c.Call.Return(run) + return _c +} + +// GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId provides a mock function with given fields: ctx, spaceId, conversationId, messageId, attachmentId +func (_m *MockGenieInterface) GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*dashboards.GenieGetMessageQueryResultResponse, error) { + ret := _m.Called(ctx, spaceId, conversationId, messageId, attachmentId) + + if len(ret) == 0 { + panic("no return value specified for GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId") + } + + var r0 *dashboards.GenieGetMessageQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) (*dashboards.GenieGetMessageQueryResultResponse, error)); ok { + return rf(ctx, spaceId, conversationId, messageId, attachmentId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string) *dashboards.GenieGetMessageQueryResultResponse); ok { + r0 = rf(ctx, spaceId, conversationId, messageId, attachmentId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGetMessageQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string) error); ok { + r1 = rf(ctx, spaceId, conversationId, messageId, attachmentId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId' +type MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call struct { + *mock.Call +} + +// GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId is a helper method to define mock.On call +// - ctx context.Context +// - spaceId string +// - conversationId string +// - messageId string +// - attachmentId string +func (_e *MockGenieInterface_Expecter) GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx interface{}, spaceId interface{}, conversationId interface{}, messageId interface{}, attachmentId interface{}) *MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call { + return &MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call{Call: _e.mock.On("GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId", ctx, spaceId, conversationId, messageId, attachmentId)} +} + +func (_c *MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call) Run(run func(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string)) *MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string)) + }) + return _c +} + +func (_c *MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call) Return(_a0 *dashboards.GenieGetMessageQueryResultResponse, _a1 error) *MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call) RunAndReturn(run func(context.Context, string, string, string, string) (*dashboards.GenieGetMessageQueryResultResponse, error)) *MockGenieInterface_GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId_Call { + _c.Call.Return(run) + return _c +} + // GetMessageBySpaceIdAndConversationIdAndMessageId provides a mock function with given fields: ctx, spaceId, conversationId, messageId func (_m *MockGenieInterface) GetMessageBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*dashboards.GenieMessage, error) { ret := _m.Called(ctx, spaceId, conversationId, messageId) diff --git a/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go b/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go index d606527b0..4e2cc2b04 100644 --- a/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go +++ b/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package dashboards diff --git a/experimental/mocks/service/dashboards/mock_lakeview_interface.go b/experimental/mocks/service/dashboards/mock_lakeview_interface.go index 0e18d5903..0dd34198e 100644 --- a/experimental/mocks/service/dashboards/mock_lakeview_interface.go +++ b/experimental/mocks/service/dashboards/mock_lakeview_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package dashboards diff --git a/experimental/mocks/service/dashboards/mock_query_execution_interface.go b/experimental/mocks/service/dashboards/mock_query_execution_interface.go index e3c3b6e90..ead40a712 100644 --- a/experimental/mocks/service/dashboards/mock_query_execution_interface.go +++ b/experimental/mocks/service/dashboards/mock_query_execution_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package dashboards diff --git a/experimental/mocks/service/files/mock_dbfs_interface.go b/experimental/mocks/service/files/mock_dbfs_interface.go index f2494b722..6406d2571 100644 --- a/experimental/mocks/service/files/mock_dbfs_interface.go +++ b/experimental/mocks/service/files/mock_dbfs_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package files diff --git a/experimental/mocks/service/files/mock_files_interface.go b/experimental/mocks/service/files/mock_files_interface.go index dfc7c45dd..462899102 100644 --- a/experimental/mocks/service/files/mock_files_interface.go +++ b/experimental/mocks/service/files/mock_files_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package files diff --git a/experimental/mocks/service/iam/mock_access_control_interface.go b/experimental/mocks/service/iam/mock_access_control_interface.go index 67324947c..6176d13e0 100644 --- a/experimental/mocks/service/iam/mock_access_control_interface.go +++ b/experimental/mocks/service/iam/mock_access_control_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_account_access_control_interface.go b/experimental/mocks/service/iam/mock_account_access_control_interface.go index e5e7dbbba..6cf335e58 100644 --- a/experimental/mocks/service/iam/mock_account_access_control_interface.go +++ b/experimental/mocks/service/iam/mock_account_access_control_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_account_access_control_proxy_interface.go b/experimental/mocks/service/iam/mock_account_access_control_proxy_interface.go index 10fd30a6f..39fe29c91 100644 --- a/experimental/mocks/service/iam/mock_account_access_control_proxy_interface.go +++ b/experimental/mocks/service/iam/mock_account_access_control_proxy_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_account_groups_interface.go b/experimental/mocks/service/iam/mock_account_groups_interface.go index fc79085f2..2c4ecc22e 100644 --- a/experimental/mocks/service/iam/mock_account_groups_interface.go +++ b/experimental/mocks/service/iam/mock_account_groups_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_account_service_principals_interface.go b/experimental/mocks/service/iam/mock_account_service_principals_interface.go index 3f4eebb3b..b4aff59ea 100644 --- a/experimental/mocks/service/iam/mock_account_service_principals_interface.go +++ b/experimental/mocks/service/iam/mock_account_service_principals_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_account_users_interface.go b/experimental/mocks/service/iam/mock_account_users_interface.go index dec7252ee..7b3350953 100644 --- a/experimental/mocks/service/iam/mock_account_users_interface.go +++ b/experimental/mocks/service/iam/mock_account_users_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_current_user_interface.go b/experimental/mocks/service/iam/mock_current_user_interface.go index 6ddcd3fab..9541a8cd0 100644 --- a/experimental/mocks/service/iam/mock_current_user_interface.go +++ b/experimental/mocks/service/iam/mock_current_user_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_groups_interface.go b/experimental/mocks/service/iam/mock_groups_interface.go index b86139bce..98a27cb22 100644 --- a/experimental/mocks/service/iam/mock_groups_interface.go +++ b/experimental/mocks/service/iam/mock_groups_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_permission_migration_interface.go b/experimental/mocks/service/iam/mock_permission_migration_interface.go index c2187d519..810073f5b 100644 --- a/experimental/mocks/service/iam/mock_permission_migration_interface.go +++ b/experimental/mocks/service/iam/mock_permission_migration_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_permissions_interface.go b/experimental/mocks/service/iam/mock_permissions_interface.go index 8b461769f..1ebb04ec8 100644 --- a/experimental/mocks/service/iam/mock_permissions_interface.go +++ b/experimental/mocks/service/iam/mock_permissions_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_service_principals_interface.go b/experimental/mocks/service/iam/mock_service_principals_interface.go index bba9feb79..01d84be99 100644 --- a/experimental/mocks/service/iam/mock_service_principals_interface.go +++ b/experimental/mocks/service/iam/mock_service_principals_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_users_interface.go b/experimental/mocks/service/iam/mock_users_interface.go index 9319fdade..17cedb4b4 100644 --- a/experimental/mocks/service/iam/mock_users_interface.go +++ b/experimental/mocks/service/iam/mock_users_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/iam/mock_workspace_assignment_interface.go b/experimental/mocks/service/iam/mock_workspace_assignment_interface.go index 7625feeab..ef27098db 100644 --- a/experimental/mocks/service/iam/mock_workspace_assignment_interface.go +++ b/experimental/mocks/service/iam/mock_workspace_assignment_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package iam diff --git a/experimental/mocks/service/jobs/mock_jobs_interface.go b/experimental/mocks/service/jobs/mock_jobs_interface.go index c00b20329..8282ee1bd 100644 --- a/experimental/mocks/service/jobs/mock_jobs_interface.go +++ b/experimental/mocks/service/jobs/mock_jobs_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package jobs diff --git a/experimental/mocks/service/jobs/mock_policy_compliance_for_jobs_interface.go b/experimental/mocks/service/jobs/mock_policy_compliance_for_jobs_interface.go index ab681d1ba..c61e6ceb8 100644 --- a/experimental/mocks/service/jobs/mock_policy_compliance_for_jobs_interface.go +++ b/experimental/mocks/service/jobs/mock_policy_compliance_for_jobs_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package jobs diff --git a/experimental/mocks/service/marketplace/mock_consumer_fulfillments_interface.go b/experimental/mocks/service/marketplace/mock_consumer_fulfillments_interface.go index 001d37a17..b06a7f7c8 100644 --- a/experimental/mocks/service/marketplace/mock_consumer_fulfillments_interface.go +++ b/experimental/mocks/service/marketplace/mock_consumer_fulfillments_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_consumer_installations_interface.go b/experimental/mocks/service/marketplace/mock_consumer_installations_interface.go index 7bc4dcde2..f93d5ffac 100644 --- a/experimental/mocks/service/marketplace/mock_consumer_installations_interface.go +++ b/experimental/mocks/service/marketplace/mock_consumer_installations_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_consumer_listings_interface.go b/experimental/mocks/service/marketplace/mock_consumer_listings_interface.go index aceb32726..cfdb1c14e 100644 --- a/experimental/mocks/service/marketplace/mock_consumer_listings_interface.go +++ b/experimental/mocks/service/marketplace/mock_consumer_listings_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_consumer_personalization_requests_interface.go b/experimental/mocks/service/marketplace/mock_consumer_personalization_requests_interface.go index dddb0cb00..0ce620999 100644 --- a/experimental/mocks/service/marketplace/mock_consumer_personalization_requests_interface.go +++ b/experimental/mocks/service/marketplace/mock_consumer_personalization_requests_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_consumer_providers_interface.go b/experimental/mocks/service/marketplace/mock_consumer_providers_interface.go index ecb1d5b07..1c2b8c46c 100644 --- a/experimental/mocks/service/marketplace/mock_consumer_providers_interface.go +++ b/experimental/mocks/service/marketplace/mock_consumer_providers_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_provider_exchange_filters_interface.go b/experimental/mocks/service/marketplace/mock_provider_exchange_filters_interface.go index 1e7826be3..4a6b9fff3 100644 --- a/experimental/mocks/service/marketplace/mock_provider_exchange_filters_interface.go +++ b/experimental/mocks/service/marketplace/mock_provider_exchange_filters_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_provider_exchanges_interface.go b/experimental/mocks/service/marketplace/mock_provider_exchanges_interface.go index 4c03760ad..c4e7eeedb 100644 --- a/experimental/mocks/service/marketplace/mock_provider_exchanges_interface.go +++ b/experimental/mocks/service/marketplace/mock_provider_exchanges_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_provider_files_interface.go b/experimental/mocks/service/marketplace/mock_provider_files_interface.go index 4c05d72af..93948d39d 100644 --- a/experimental/mocks/service/marketplace/mock_provider_files_interface.go +++ b/experimental/mocks/service/marketplace/mock_provider_files_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_provider_listings_interface.go b/experimental/mocks/service/marketplace/mock_provider_listings_interface.go index b1c0a55bc..d14fbba3a 100644 --- a/experimental/mocks/service/marketplace/mock_provider_listings_interface.go +++ b/experimental/mocks/service/marketplace/mock_provider_listings_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_provider_personalization_requests_interface.go b/experimental/mocks/service/marketplace/mock_provider_personalization_requests_interface.go index 0792ffc86..a7bc22852 100644 --- a/experimental/mocks/service/marketplace/mock_provider_personalization_requests_interface.go +++ b/experimental/mocks/service/marketplace/mock_provider_personalization_requests_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_provider_provider_analytics_dashboards_interface.go b/experimental/mocks/service/marketplace/mock_provider_provider_analytics_dashboards_interface.go index 3bee4950a..58bf073c4 100644 --- a/experimental/mocks/service/marketplace/mock_provider_provider_analytics_dashboards_interface.go +++ b/experimental/mocks/service/marketplace/mock_provider_provider_analytics_dashboards_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/marketplace/mock_provider_providers_interface.go b/experimental/mocks/service/marketplace/mock_provider_providers_interface.go index bbab1a55e..73f896d28 100644 --- a/experimental/mocks/service/marketplace/mock_provider_providers_interface.go +++ b/experimental/mocks/service/marketplace/mock_provider_providers_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package marketplace diff --git a/experimental/mocks/service/ml/mock_experiments_interface.go b/experimental/mocks/service/ml/mock_experiments_interface.go index e7cc6155a..e13c8f47f 100644 --- a/experimental/mocks/service/ml/mock_experiments_interface.go +++ b/experimental/mocks/service/ml/mock_experiments_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package ml diff --git a/experimental/mocks/service/ml/mock_forecasting_interface.go b/experimental/mocks/service/ml/mock_forecasting_interface.go new file mode 100644 index 000000000..3f96a9ddb --- /dev/null +++ b/experimental/mocks/service/ml/mock_forecasting_interface.go @@ -0,0 +1,353 @@ +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package ml + +import ( + context "context" + + ml "github.com/databricks/databricks-sdk-go/service/ml" + mock "github.com/stretchr/testify/mock" + + retries "github.com/databricks/databricks-sdk-go/retries" + + time "time" +) + +// MockForecastingInterface is an autogenerated mock type for the ForecastingInterface type +type MockForecastingInterface struct { + mock.Mock +} + +type MockForecastingInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockForecastingInterface) EXPECT() *MockForecastingInterface_Expecter { + return &MockForecastingInterface_Expecter{mock: &_m.Mock} +} + +// CreateExperiment provides a mock function with given fields: ctx, createForecastingExperimentRequest +func (_m *MockForecastingInterface) CreateExperiment(ctx context.Context, createForecastingExperimentRequest ml.CreateForecastingExperimentRequest) (*ml.WaitGetExperimentForecastingSucceeded[ml.CreateForecastingExperimentResponse], error) { + ret := _m.Called(ctx, createForecastingExperimentRequest) + + if len(ret) == 0 { + panic("no return value specified for CreateExperiment") + } + + var r0 *ml.WaitGetExperimentForecastingSucceeded[ml.CreateForecastingExperimentResponse] + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ml.CreateForecastingExperimentRequest) (*ml.WaitGetExperimentForecastingSucceeded[ml.CreateForecastingExperimentResponse], error)); ok { + return rf(ctx, createForecastingExperimentRequest) + } + if rf, ok := ret.Get(0).(func(context.Context, ml.CreateForecastingExperimentRequest) *ml.WaitGetExperimentForecastingSucceeded[ml.CreateForecastingExperimentResponse]); ok { + r0 = rf(ctx, createForecastingExperimentRequest) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.WaitGetExperimentForecastingSucceeded[ml.CreateForecastingExperimentResponse]) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ml.CreateForecastingExperimentRequest) error); ok { + r1 = rf(ctx, createForecastingExperimentRequest) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockForecastingInterface_CreateExperiment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateExperiment' +type MockForecastingInterface_CreateExperiment_Call struct { + *mock.Call +} + +// CreateExperiment is a helper method to define mock.On call +// - ctx context.Context +// - createForecastingExperimentRequest ml.CreateForecastingExperimentRequest +func (_e *MockForecastingInterface_Expecter) CreateExperiment(ctx interface{}, createForecastingExperimentRequest interface{}) *MockForecastingInterface_CreateExperiment_Call { + return &MockForecastingInterface_CreateExperiment_Call{Call: _e.mock.On("CreateExperiment", ctx, createForecastingExperimentRequest)} +} + +func (_c *MockForecastingInterface_CreateExperiment_Call) Run(run func(ctx context.Context, createForecastingExperimentRequest ml.CreateForecastingExperimentRequest)) *MockForecastingInterface_CreateExperiment_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ml.CreateForecastingExperimentRequest)) + }) + return _c +} + +func (_c *MockForecastingInterface_CreateExperiment_Call) Return(_a0 *ml.WaitGetExperimentForecastingSucceeded[ml.CreateForecastingExperimentResponse], _a1 error) *MockForecastingInterface_CreateExperiment_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockForecastingInterface_CreateExperiment_Call) RunAndReturn(run func(context.Context, ml.CreateForecastingExperimentRequest) (*ml.WaitGetExperimentForecastingSucceeded[ml.CreateForecastingExperimentResponse], error)) *MockForecastingInterface_CreateExperiment_Call { + _c.Call.Return(run) + return _c +} + +// CreateExperimentAndWait provides a mock function with given fields: ctx, createForecastingExperimentRequest, options +func (_m *MockForecastingInterface) CreateExperimentAndWait(ctx context.Context, createForecastingExperimentRequest ml.CreateForecastingExperimentRequest, options ...retries.Option[ml.ForecastingExperiment]) (*ml.ForecastingExperiment, error) { + _va := make([]interface{}, len(options)) + for _i := range options { + _va[_i] = options[_i] + } + var _ca []interface{} + _ca = append(_ca, ctx, createForecastingExperimentRequest) + _ca = append(_ca, _va...) + ret := _m.Called(_ca...) + + if len(ret) == 0 { + panic("no return value specified for CreateExperimentAndWait") + } + + var r0 *ml.ForecastingExperiment + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ml.CreateForecastingExperimentRequest, ...retries.Option[ml.ForecastingExperiment]) (*ml.ForecastingExperiment, error)); ok { + return rf(ctx, createForecastingExperimentRequest, options...) + } + if rf, ok := ret.Get(0).(func(context.Context, ml.CreateForecastingExperimentRequest, ...retries.Option[ml.ForecastingExperiment]) *ml.ForecastingExperiment); ok { + r0 = rf(ctx, createForecastingExperimentRequest, options...) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.ForecastingExperiment) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ml.CreateForecastingExperimentRequest, ...retries.Option[ml.ForecastingExperiment]) error); ok { + r1 = rf(ctx, createForecastingExperimentRequest, options...) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockForecastingInterface_CreateExperimentAndWait_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateExperimentAndWait' +type MockForecastingInterface_CreateExperimentAndWait_Call struct { + *mock.Call +} + +// CreateExperimentAndWait is a helper method to define mock.On call +// - ctx context.Context +// - createForecastingExperimentRequest ml.CreateForecastingExperimentRequest +// - options ...retries.Option[ml.ForecastingExperiment] +func (_e *MockForecastingInterface_Expecter) CreateExperimentAndWait(ctx interface{}, createForecastingExperimentRequest interface{}, options ...interface{}) *MockForecastingInterface_CreateExperimentAndWait_Call { + return &MockForecastingInterface_CreateExperimentAndWait_Call{Call: _e.mock.On("CreateExperimentAndWait", + append([]interface{}{ctx, createForecastingExperimentRequest}, options...)...)} +} + +func (_c *MockForecastingInterface_CreateExperimentAndWait_Call) Run(run func(ctx context.Context, createForecastingExperimentRequest ml.CreateForecastingExperimentRequest, options ...retries.Option[ml.ForecastingExperiment])) *MockForecastingInterface_CreateExperimentAndWait_Call { + _c.Call.Run(func(args mock.Arguments) { + variadicArgs := make([]retries.Option[ml.ForecastingExperiment], len(args)-2) + for i, a := range args[2:] { + if a != nil { + variadicArgs[i] = a.(retries.Option[ml.ForecastingExperiment]) + } + } + run(args[0].(context.Context), args[1].(ml.CreateForecastingExperimentRequest), variadicArgs...) + }) + return _c +} + +func (_c *MockForecastingInterface_CreateExperimentAndWait_Call) Return(_a0 *ml.ForecastingExperiment, _a1 error) *MockForecastingInterface_CreateExperimentAndWait_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockForecastingInterface_CreateExperimentAndWait_Call) RunAndReturn(run func(context.Context, ml.CreateForecastingExperimentRequest, ...retries.Option[ml.ForecastingExperiment]) (*ml.ForecastingExperiment, error)) *MockForecastingInterface_CreateExperimentAndWait_Call { + _c.Call.Return(run) + return _c +} + +// GetExperiment provides a mock function with given fields: ctx, request +func (_m *MockForecastingInterface) GetExperiment(ctx context.Context, request ml.GetForecastingExperimentRequest) (*ml.ForecastingExperiment, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetExperiment") + } + + var r0 *ml.ForecastingExperiment + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ml.GetForecastingExperimentRequest) (*ml.ForecastingExperiment, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, ml.GetForecastingExperimentRequest) *ml.ForecastingExperiment); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.ForecastingExperiment) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ml.GetForecastingExperimentRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockForecastingInterface_GetExperiment_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExperiment' +type MockForecastingInterface_GetExperiment_Call struct { + *mock.Call +} + +// GetExperiment is a helper method to define mock.On call +// - ctx context.Context +// - request ml.GetForecastingExperimentRequest +func (_e *MockForecastingInterface_Expecter) GetExperiment(ctx interface{}, request interface{}) *MockForecastingInterface_GetExperiment_Call { + return &MockForecastingInterface_GetExperiment_Call{Call: _e.mock.On("GetExperiment", ctx, request)} +} + +func (_c *MockForecastingInterface_GetExperiment_Call) Run(run func(ctx context.Context, request ml.GetForecastingExperimentRequest)) *MockForecastingInterface_GetExperiment_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ml.GetForecastingExperimentRequest)) + }) + return _c +} + +func (_c *MockForecastingInterface_GetExperiment_Call) Return(_a0 *ml.ForecastingExperiment, _a1 error) *MockForecastingInterface_GetExperiment_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockForecastingInterface_GetExperiment_Call) RunAndReturn(run func(context.Context, ml.GetForecastingExperimentRequest) (*ml.ForecastingExperiment, error)) *MockForecastingInterface_GetExperiment_Call { + _c.Call.Return(run) + return _c +} + +// GetExperimentByExperimentId provides a mock function with given fields: ctx, experimentId +func (_m *MockForecastingInterface) GetExperimentByExperimentId(ctx context.Context, experimentId string) (*ml.ForecastingExperiment, error) { + ret := _m.Called(ctx, experimentId) + + if len(ret) == 0 { + panic("no return value specified for GetExperimentByExperimentId") + } + + var r0 *ml.ForecastingExperiment + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*ml.ForecastingExperiment, error)); ok { + return rf(ctx, experimentId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *ml.ForecastingExperiment); ok { + r0 = rf(ctx, experimentId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.ForecastingExperiment) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, experimentId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockForecastingInterface_GetExperimentByExperimentId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetExperimentByExperimentId' +type MockForecastingInterface_GetExperimentByExperimentId_Call struct { + *mock.Call +} + +// GetExperimentByExperimentId is a helper method to define mock.On call +// - ctx context.Context +// - experimentId string +func (_e *MockForecastingInterface_Expecter) GetExperimentByExperimentId(ctx interface{}, experimentId interface{}) *MockForecastingInterface_GetExperimentByExperimentId_Call { + return &MockForecastingInterface_GetExperimentByExperimentId_Call{Call: _e.mock.On("GetExperimentByExperimentId", ctx, experimentId)} +} + +func (_c *MockForecastingInterface_GetExperimentByExperimentId_Call) Run(run func(ctx context.Context, experimentId string)) *MockForecastingInterface_GetExperimentByExperimentId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockForecastingInterface_GetExperimentByExperimentId_Call) Return(_a0 *ml.ForecastingExperiment, _a1 error) *MockForecastingInterface_GetExperimentByExperimentId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockForecastingInterface_GetExperimentByExperimentId_Call) RunAndReturn(run func(context.Context, string) (*ml.ForecastingExperiment, error)) *MockForecastingInterface_GetExperimentByExperimentId_Call { + _c.Call.Return(run) + return _c +} + +// WaitGetExperimentForecastingSucceeded provides a mock function with given fields: ctx, experimentId, timeout, callback +func (_m *MockForecastingInterface) WaitGetExperimentForecastingSucceeded(ctx context.Context, experimentId string, timeout time.Duration, callback func(*ml.ForecastingExperiment)) (*ml.ForecastingExperiment, error) { + ret := _m.Called(ctx, experimentId, timeout, callback) + + if len(ret) == 0 { + panic("no return value specified for WaitGetExperimentForecastingSucceeded") + } + + var r0 *ml.ForecastingExperiment + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, time.Duration, func(*ml.ForecastingExperiment)) (*ml.ForecastingExperiment, error)); ok { + return rf(ctx, experimentId, timeout, callback) + } + if rf, ok := ret.Get(0).(func(context.Context, string, time.Duration, func(*ml.ForecastingExperiment)) *ml.ForecastingExperiment); ok { + r0 = rf(ctx, experimentId, timeout, callback) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.ForecastingExperiment) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, time.Duration, func(*ml.ForecastingExperiment)) error); ok { + r1 = rf(ctx, experimentId, timeout, callback) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitGetExperimentForecastingSucceeded' +type MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call struct { + *mock.Call +} + +// WaitGetExperimentForecastingSucceeded is a helper method to define mock.On call +// - ctx context.Context +// - experimentId string +// - timeout time.Duration +// - callback func(*ml.ForecastingExperiment) +func (_e *MockForecastingInterface_Expecter) WaitGetExperimentForecastingSucceeded(ctx interface{}, experimentId interface{}, timeout interface{}, callback interface{}) *MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call { + return &MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call{Call: _e.mock.On("WaitGetExperimentForecastingSucceeded", ctx, experimentId, timeout, callback)} +} + +func (_c *MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call) Run(run func(ctx context.Context, experimentId string, timeout time.Duration, callback func(*ml.ForecastingExperiment))) *MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(time.Duration), args[3].(func(*ml.ForecastingExperiment))) + }) + return _c +} + +func (_c *MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call) Return(_a0 *ml.ForecastingExperiment, _a1 error) *MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call) RunAndReturn(run func(context.Context, string, time.Duration, func(*ml.ForecastingExperiment)) (*ml.ForecastingExperiment, error)) *MockForecastingInterface_WaitGetExperimentForecastingSucceeded_Call { + _c.Call.Return(run) + return _c +} + +// NewMockForecastingInterface creates a new instance of MockForecastingInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockForecastingInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockForecastingInterface { + mock := &MockForecastingInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/ml/mock_model_registry_interface.go b/experimental/mocks/service/ml/mock_model_registry_interface.go index c4196bccd..7db047dcc 100644 --- a/experimental/mocks/service/ml/mock_model_registry_interface.go +++ b/experimental/mocks/service/ml/mock_model_registry_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package ml diff --git a/experimental/mocks/service/oauth2/mock_account_federation_policy_interface.go b/experimental/mocks/service/oauth2/mock_account_federation_policy_interface.go index 13fe9d482..8ae80d407 100644 --- a/experimental/mocks/service/oauth2/mock_account_federation_policy_interface.go +++ b/experimental/mocks/service/oauth2/mock_account_federation_policy_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package oauth2 diff --git a/experimental/mocks/service/oauth2/mock_custom_app_integration_interface.go b/experimental/mocks/service/oauth2/mock_custom_app_integration_interface.go index 714d00854..3546f76ae 100644 --- a/experimental/mocks/service/oauth2/mock_custom_app_integration_interface.go +++ b/experimental/mocks/service/oauth2/mock_custom_app_integration_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package oauth2 diff --git a/experimental/mocks/service/oauth2/mock_o_auth_published_apps_interface.go b/experimental/mocks/service/oauth2/mock_o_auth_published_apps_interface.go index 589dfc670..730873f6d 100644 --- a/experimental/mocks/service/oauth2/mock_o_auth_published_apps_interface.go +++ b/experimental/mocks/service/oauth2/mock_o_auth_published_apps_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package oauth2 diff --git a/experimental/mocks/service/oauth2/mock_published_app_integration_interface.go b/experimental/mocks/service/oauth2/mock_published_app_integration_interface.go index b9641341a..8cc1c843b 100644 --- a/experimental/mocks/service/oauth2/mock_published_app_integration_interface.go +++ b/experimental/mocks/service/oauth2/mock_published_app_integration_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package oauth2 diff --git a/experimental/mocks/service/oauth2/mock_service_principal_federation_policy_interface.go b/experimental/mocks/service/oauth2/mock_service_principal_federation_policy_interface.go index 31ab67310..85f7cf861 100644 --- a/experimental/mocks/service/oauth2/mock_service_principal_federation_policy_interface.go +++ b/experimental/mocks/service/oauth2/mock_service_principal_federation_policy_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package oauth2 diff --git a/experimental/mocks/service/oauth2/mock_service_principal_secrets_interface.go b/experimental/mocks/service/oauth2/mock_service_principal_secrets_interface.go index 06f9d0828..4ce1fd8fb 100644 --- a/experimental/mocks/service/oauth2/mock_service_principal_secrets_interface.go +++ b/experimental/mocks/service/oauth2/mock_service_principal_secrets_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package oauth2 diff --git a/experimental/mocks/service/pipelines/mock_pipelines_interface.go b/experimental/mocks/service/pipelines/mock_pipelines_interface.go index bc932ef31..db41cbca0 100644 --- a/experimental/mocks/service/pipelines/mock_pipelines_interface.go +++ b/experimental/mocks/service/pipelines/mock_pipelines_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package pipelines diff --git a/experimental/mocks/service/provisioning/mock_credentials_interface.go b/experimental/mocks/service/provisioning/mock_credentials_interface.go index d02f18583..9d9f9b887 100644 --- a/experimental/mocks/service/provisioning/mock_credentials_interface.go +++ b/experimental/mocks/service/provisioning/mock_credentials_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package provisioning diff --git a/experimental/mocks/service/provisioning/mock_encryption_keys_interface.go b/experimental/mocks/service/provisioning/mock_encryption_keys_interface.go index 78836e9f2..bc8ef4559 100644 --- a/experimental/mocks/service/provisioning/mock_encryption_keys_interface.go +++ b/experimental/mocks/service/provisioning/mock_encryption_keys_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package provisioning diff --git a/experimental/mocks/service/provisioning/mock_networks_interface.go b/experimental/mocks/service/provisioning/mock_networks_interface.go index 489e9f4f1..a47e59609 100644 --- a/experimental/mocks/service/provisioning/mock_networks_interface.go +++ b/experimental/mocks/service/provisioning/mock_networks_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package provisioning diff --git a/experimental/mocks/service/provisioning/mock_private_access_interface.go b/experimental/mocks/service/provisioning/mock_private_access_interface.go index f078209c0..ce479f6df 100644 --- a/experimental/mocks/service/provisioning/mock_private_access_interface.go +++ b/experimental/mocks/service/provisioning/mock_private_access_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package provisioning diff --git a/experimental/mocks/service/provisioning/mock_storage_interface.go b/experimental/mocks/service/provisioning/mock_storage_interface.go index 007d2cbae..03773cd81 100644 --- a/experimental/mocks/service/provisioning/mock_storage_interface.go +++ b/experimental/mocks/service/provisioning/mock_storage_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package provisioning diff --git a/experimental/mocks/service/provisioning/mock_vpc_endpoints_interface.go b/experimental/mocks/service/provisioning/mock_vpc_endpoints_interface.go index 61426495b..df2cb6bfe 100644 --- a/experimental/mocks/service/provisioning/mock_vpc_endpoints_interface.go +++ b/experimental/mocks/service/provisioning/mock_vpc_endpoints_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package provisioning diff --git a/experimental/mocks/service/provisioning/mock_workspaces_interface.go b/experimental/mocks/service/provisioning/mock_workspaces_interface.go index 230d2b9f3..01d3786cb 100644 --- a/experimental/mocks/service/provisioning/mock_workspaces_interface.go +++ b/experimental/mocks/service/provisioning/mock_workspaces_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package provisioning diff --git a/experimental/mocks/service/serving/mock_serving_endpoints_data_plane_interface.go b/experimental/mocks/service/serving/mock_serving_endpoints_data_plane_interface.go index 28332a309..bd372ec84 100644 --- a/experimental/mocks/service/serving/mock_serving_endpoints_data_plane_interface.go +++ b/experimental/mocks/service/serving/mock_serving_endpoints_data_plane_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package serving diff --git a/experimental/mocks/service/serving/mock_serving_endpoints_interface.go b/experimental/mocks/service/serving/mock_serving_endpoints_interface.go index a4cdcf157..47c8bde06 100644 --- a/experimental/mocks/service/serving/mock_serving_endpoints_interface.go +++ b/experimental/mocks/service/serving/mock_serving_endpoints_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package serving diff --git a/experimental/mocks/service/settings/mock_account_ip_access_lists_interface.go b/experimental/mocks/service/settings/mock_account_ip_access_lists_interface.go index d0a7599ba..950a5d44a 100644 --- a/experimental/mocks/service/settings/mock_account_ip_access_lists_interface.go +++ b/experimental/mocks/service/settings/mock_account_ip_access_lists_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_account_settings_interface.go b/experimental/mocks/service/settings/mock_account_settings_interface.go index 82c9a3546..0939459c2 100644 --- a/experimental/mocks/service/settings/mock_account_settings_interface.go +++ b/experimental/mocks/service/settings/mock_account_settings_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings @@ -20,7 +20,7 @@ func (_m *MockAccountSettingsInterface) EXPECT() *MockAccountSettingsInterface_E return &MockAccountSettingsInterface_Expecter{mock: &_m.Mock} } -// CspEnablementAccount provides a mock function with given fields: +// CspEnablementAccount provides a mock function with no fields func (_m *MockAccountSettingsInterface) CspEnablementAccount() settings.CspEnablementAccountInterface { ret := _m.Called() @@ -67,7 +67,7 @@ func (_c *MockAccountSettingsInterface_CspEnablementAccount_Call) RunAndReturn(r return _c } -// DisableLegacyFeatures provides a mock function with given fields: +// DisableLegacyFeatures provides a mock function with no fields func (_m *MockAccountSettingsInterface) DisableLegacyFeatures() settings.DisableLegacyFeaturesInterface { ret := _m.Called() @@ -114,7 +114,7 @@ func (_c *MockAccountSettingsInterface_DisableLegacyFeatures_Call) RunAndReturn( return _c } -// EnableIpAccessLists provides a mock function with given fields: +// EnableIpAccessLists provides a mock function with no fields func (_m *MockAccountSettingsInterface) EnableIpAccessLists() settings.EnableIpAccessListsInterface { ret := _m.Called() @@ -161,7 +161,7 @@ func (_c *MockAccountSettingsInterface_EnableIpAccessLists_Call) RunAndReturn(ru return _c } -// EsmEnablementAccount provides a mock function with given fields: +// EsmEnablementAccount provides a mock function with no fields func (_m *MockAccountSettingsInterface) EsmEnablementAccount() settings.EsmEnablementAccountInterface { ret := _m.Called() @@ -208,7 +208,7 @@ func (_c *MockAccountSettingsInterface_EsmEnablementAccount_Call) RunAndReturn(r return _c } -// PersonalCompute provides a mock function with given fields: +// PersonalCompute provides a mock function with no fields func (_m *MockAccountSettingsInterface) PersonalCompute() settings.PersonalComputeInterface { ret := _m.Called() diff --git a/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_access_policy_interface.go b/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_access_policy_interface.go index 1c7bdfb20..051f8b175 100644 --- a/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_access_policy_interface.go +++ b/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_access_policy_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_approved_domains_interface.go b/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_approved_domains_interface.go index 059be7623..437d8030a 100644 --- a/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_approved_domains_interface.go +++ b/experimental/mocks/service/settings/mock_aibi_dashboard_embedding_approved_domains_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_automatic_cluster_update_interface.go b/experimental/mocks/service/settings/mock_automatic_cluster_update_interface.go index 45d44fdaa..b9305eb26 100644 --- a/experimental/mocks/service/settings/mock_automatic_cluster_update_interface.go +++ b/experimental/mocks/service/settings/mock_automatic_cluster_update_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_compliance_security_profile_interface.go b/experimental/mocks/service/settings/mock_compliance_security_profile_interface.go index e16518dd4..241faa448 100644 --- a/experimental/mocks/service/settings/mock_compliance_security_profile_interface.go +++ b/experimental/mocks/service/settings/mock_compliance_security_profile_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_credentials_manager_interface.go b/experimental/mocks/service/settings/mock_credentials_manager_interface.go index c23f9f2dd..8c66e0672 100644 --- a/experimental/mocks/service/settings/mock_credentials_manager_interface.go +++ b/experimental/mocks/service/settings/mock_credentials_manager_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_csp_enablement_account_interface.go b/experimental/mocks/service/settings/mock_csp_enablement_account_interface.go index 97bfb1918..fc056ce8e 100644 --- a/experimental/mocks/service/settings/mock_csp_enablement_account_interface.go +++ b/experimental/mocks/service/settings/mock_csp_enablement_account_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_default_namespace_interface.go b/experimental/mocks/service/settings/mock_default_namespace_interface.go index 9a4cb866f..7cbbb02f3 100644 --- a/experimental/mocks/service/settings/mock_default_namespace_interface.go +++ b/experimental/mocks/service/settings/mock_default_namespace_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_disable_legacy_access_interface.go b/experimental/mocks/service/settings/mock_disable_legacy_access_interface.go index e3ccf3098..d66f811eb 100644 --- a/experimental/mocks/service/settings/mock_disable_legacy_access_interface.go +++ b/experimental/mocks/service/settings/mock_disable_legacy_access_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_disable_legacy_dbfs_interface.go b/experimental/mocks/service/settings/mock_disable_legacy_dbfs_interface.go index 2b426907f..c3743250d 100644 --- a/experimental/mocks/service/settings/mock_disable_legacy_dbfs_interface.go +++ b/experimental/mocks/service/settings/mock_disable_legacy_dbfs_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_disable_legacy_features_interface.go b/experimental/mocks/service/settings/mock_disable_legacy_features_interface.go index d67723519..ce3329c39 100644 --- a/experimental/mocks/service/settings/mock_disable_legacy_features_interface.go +++ b/experimental/mocks/service/settings/mock_disable_legacy_features_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go b/experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go index 54868cbe8..5cef0dfb5 100644 --- a/experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go +++ b/experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_enhanced_security_monitoring_interface.go b/experimental/mocks/service/settings/mock_enhanced_security_monitoring_interface.go index 13f2a64c4..480155b87 100644 --- a/experimental/mocks/service/settings/mock_enhanced_security_monitoring_interface.go +++ b/experimental/mocks/service/settings/mock_enhanced_security_monitoring_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_esm_enablement_account_interface.go b/experimental/mocks/service/settings/mock_esm_enablement_account_interface.go index 05385abf0..4952a23f9 100644 --- a/experimental/mocks/service/settings/mock_esm_enablement_account_interface.go +++ b/experimental/mocks/service/settings/mock_esm_enablement_account_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_ip_access_lists_interface.go b/experimental/mocks/service/settings/mock_ip_access_lists_interface.go index 8f69bc5c2..0cf73dd9f 100644 --- a/experimental/mocks/service/settings/mock_ip_access_lists_interface.go +++ b/experimental/mocks/service/settings/mock_ip_access_lists_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_network_connectivity_interface.go b/experimental/mocks/service/settings/mock_network_connectivity_interface.go index c0c809d61..d0d65089d 100644 --- a/experimental/mocks/service/settings/mock_network_connectivity_interface.go +++ b/experimental/mocks/service/settings/mock_network_connectivity_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_notification_destinations_interface.go b/experimental/mocks/service/settings/mock_notification_destinations_interface.go index 1a7ba71fa..c8786c561 100644 --- a/experimental/mocks/service/settings/mock_notification_destinations_interface.go +++ b/experimental/mocks/service/settings/mock_notification_destinations_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_personal_compute_interface.go b/experimental/mocks/service/settings/mock_personal_compute_interface.go index cb747b324..ff3f71a7e 100644 --- a/experimental/mocks/service/settings/mock_personal_compute_interface.go +++ b/experimental/mocks/service/settings/mock_personal_compute_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_restrict_workspace_admins_interface.go b/experimental/mocks/service/settings/mock_restrict_workspace_admins_interface.go index 5dd1373a0..c5a16bda2 100644 --- a/experimental/mocks/service/settings/mock_restrict_workspace_admins_interface.go +++ b/experimental/mocks/service/settings/mock_restrict_workspace_admins_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_settings_interface.go b/experimental/mocks/service/settings/mock_settings_interface.go index 0e4a6a17a..73f937d27 100644 --- a/experimental/mocks/service/settings/mock_settings_interface.go +++ b/experimental/mocks/service/settings/mock_settings_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings @@ -20,7 +20,7 @@ func (_m *MockSettingsInterface) EXPECT() *MockSettingsInterface_Expecter { return &MockSettingsInterface_Expecter{mock: &_m.Mock} } -// AibiDashboardEmbeddingAccessPolicy provides a mock function with given fields: +// AibiDashboardEmbeddingAccessPolicy provides a mock function with no fields func (_m *MockSettingsInterface) AibiDashboardEmbeddingAccessPolicy() settings.AibiDashboardEmbeddingAccessPolicyInterface { ret := _m.Called() @@ -67,7 +67,7 @@ func (_c *MockSettingsInterface_AibiDashboardEmbeddingAccessPolicy_Call) RunAndR return _c } -// AibiDashboardEmbeddingApprovedDomains provides a mock function with given fields: +// AibiDashboardEmbeddingApprovedDomains provides a mock function with no fields func (_m *MockSettingsInterface) AibiDashboardEmbeddingApprovedDomains() settings.AibiDashboardEmbeddingApprovedDomainsInterface { ret := _m.Called() @@ -114,7 +114,7 @@ func (_c *MockSettingsInterface_AibiDashboardEmbeddingApprovedDomains_Call) RunA return _c } -// AutomaticClusterUpdate provides a mock function with given fields: +// AutomaticClusterUpdate provides a mock function with no fields func (_m *MockSettingsInterface) AutomaticClusterUpdate() settings.AutomaticClusterUpdateInterface { ret := _m.Called() @@ -161,7 +161,7 @@ func (_c *MockSettingsInterface_AutomaticClusterUpdate_Call) RunAndReturn(run fu return _c } -// ComplianceSecurityProfile provides a mock function with given fields: +// ComplianceSecurityProfile provides a mock function with no fields func (_m *MockSettingsInterface) ComplianceSecurityProfile() settings.ComplianceSecurityProfileInterface { ret := _m.Called() @@ -208,7 +208,7 @@ func (_c *MockSettingsInterface_ComplianceSecurityProfile_Call) RunAndReturn(run return _c } -// DefaultNamespace provides a mock function with given fields: +// DefaultNamespace provides a mock function with no fields func (_m *MockSettingsInterface) DefaultNamespace() settings.DefaultNamespaceInterface { ret := _m.Called() @@ -255,7 +255,7 @@ func (_c *MockSettingsInterface_DefaultNamespace_Call) RunAndReturn(run func() s return _c } -// DisableLegacyAccess provides a mock function with given fields: +// DisableLegacyAccess provides a mock function with no fields func (_m *MockSettingsInterface) DisableLegacyAccess() settings.DisableLegacyAccessInterface { ret := _m.Called() @@ -302,7 +302,7 @@ func (_c *MockSettingsInterface_DisableLegacyAccess_Call) RunAndReturn(run func( return _c } -// DisableLegacyDbfs provides a mock function with given fields: +// DisableLegacyDbfs provides a mock function with no fields func (_m *MockSettingsInterface) DisableLegacyDbfs() settings.DisableLegacyDbfsInterface { ret := _m.Called() @@ -349,7 +349,7 @@ func (_c *MockSettingsInterface_DisableLegacyDbfs_Call) RunAndReturn(run func() return _c } -// EnhancedSecurityMonitoring provides a mock function with given fields: +// EnhancedSecurityMonitoring provides a mock function with no fields func (_m *MockSettingsInterface) EnhancedSecurityMonitoring() settings.EnhancedSecurityMonitoringInterface { ret := _m.Called() @@ -396,7 +396,7 @@ func (_c *MockSettingsInterface_EnhancedSecurityMonitoring_Call) RunAndReturn(ru return _c } -// RestrictWorkspaceAdmins provides a mock function with given fields: +// RestrictWorkspaceAdmins provides a mock function with no fields func (_m *MockSettingsInterface) RestrictWorkspaceAdmins() settings.RestrictWorkspaceAdminsInterface { ret := _m.Called() diff --git a/experimental/mocks/service/settings/mock_token_management_interface.go b/experimental/mocks/service/settings/mock_token_management_interface.go index 14f8737ce..c4c77ec9c 100644 --- a/experimental/mocks/service/settings/mock_token_management_interface.go +++ b/experimental/mocks/service/settings/mock_token_management_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_tokens_interface.go b/experimental/mocks/service/settings/mock_tokens_interface.go index 4b3263816..f3f52f806 100644 --- a/experimental/mocks/service/settings/mock_tokens_interface.go +++ b/experimental/mocks/service/settings/mock_tokens_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/settings/mock_workspace_conf_interface.go b/experimental/mocks/service/settings/mock_workspace_conf_interface.go index e7067d78f..7634cacc3 100644 --- a/experimental/mocks/service/settings/mock_workspace_conf_interface.go +++ b/experimental/mocks/service/settings/mock_workspace_conf_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package settings diff --git a/experimental/mocks/service/sharing/mock_providers_interface.go b/experimental/mocks/service/sharing/mock_providers_interface.go index 4f3cb5aca..93a221c88 100644 --- a/experimental/mocks/service/sharing/mock_providers_interface.go +++ b/experimental/mocks/service/sharing/mock_providers_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sharing diff --git a/experimental/mocks/service/sharing/mock_recipient_activation_interface.go b/experimental/mocks/service/sharing/mock_recipient_activation_interface.go index a940afe6a..fa7dcb417 100644 --- a/experimental/mocks/service/sharing/mock_recipient_activation_interface.go +++ b/experimental/mocks/service/sharing/mock_recipient_activation_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sharing diff --git a/experimental/mocks/service/sharing/mock_recipients_interface.go b/experimental/mocks/service/sharing/mock_recipients_interface.go index 9a4a4dec4..4cf8ab0e6 100644 --- a/experimental/mocks/service/sharing/mock_recipients_interface.go +++ b/experimental/mocks/service/sharing/mock_recipients_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sharing diff --git a/experimental/mocks/service/sharing/mock_shares_interface.go b/experimental/mocks/service/sharing/mock_shares_interface.go index 873133875..f66a01665 100644 --- a/experimental/mocks/service/sharing/mock_shares_interface.go +++ b/experimental/mocks/service/sharing/mock_shares_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sharing diff --git a/experimental/mocks/service/sql/mock_alerts_interface.go b/experimental/mocks/service/sql/mock_alerts_interface.go index 20408c2fd..a7afd9497 100644 --- a/experimental/mocks/service/sql/mock_alerts_interface.go +++ b/experimental/mocks/service/sql/mock_alerts_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_alerts_legacy_interface.go b/experimental/mocks/service/sql/mock_alerts_legacy_interface.go index f990b0bc6..16a24de53 100644 --- a/experimental/mocks/service/sql/mock_alerts_legacy_interface.go +++ b/experimental/mocks/service/sql/mock_alerts_legacy_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_dashboard_widgets_interface.go b/experimental/mocks/service/sql/mock_dashboard_widgets_interface.go index e0f4b74e7..160f3d3c6 100644 --- a/experimental/mocks/service/sql/mock_dashboard_widgets_interface.go +++ b/experimental/mocks/service/sql/mock_dashboard_widgets_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_dashboards_interface.go b/experimental/mocks/service/sql/mock_dashboards_interface.go index 605c03395..8771ff5f2 100644 --- a/experimental/mocks/service/sql/mock_dashboards_interface.go +++ b/experimental/mocks/service/sql/mock_dashboards_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_data_sources_interface.go b/experimental/mocks/service/sql/mock_data_sources_interface.go index fb1a708e2..963277e92 100644 --- a/experimental/mocks/service/sql/mock_data_sources_interface.go +++ b/experimental/mocks/service/sql/mock_data_sources_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_dbsql_permissions_interface.go b/experimental/mocks/service/sql/mock_dbsql_permissions_interface.go index 95cfdfc0e..689a9bf0a 100644 --- a/experimental/mocks/service/sql/mock_dbsql_permissions_interface.go +++ b/experimental/mocks/service/sql/mock_dbsql_permissions_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_queries_interface.go b/experimental/mocks/service/sql/mock_queries_interface.go index efc62901b..5dd90cc32 100644 --- a/experimental/mocks/service/sql/mock_queries_interface.go +++ b/experimental/mocks/service/sql/mock_queries_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_queries_legacy_interface.go b/experimental/mocks/service/sql/mock_queries_legacy_interface.go index e7b6e60b2..2997c23c5 100644 --- a/experimental/mocks/service/sql/mock_queries_legacy_interface.go +++ b/experimental/mocks/service/sql/mock_queries_legacy_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_query_history_interface.go b/experimental/mocks/service/sql/mock_query_history_interface.go index 2091b7cfe..127c8ddfc 100644 --- a/experimental/mocks/service/sql/mock_query_history_interface.go +++ b/experimental/mocks/service/sql/mock_query_history_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_query_visualizations_interface.go b/experimental/mocks/service/sql/mock_query_visualizations_interface.go index 267920cb7..6eed1c81d 100644 --- a/experimental/mocks/service/sql/mock_query_visualizations_interface.go +++ b/experimental/mocks/service/sql/mock_query_visualizations_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_query_visualizations_legacy_interface.go b/experimental/mocks/service/sql/mock_query_visualizations_legacy_interface.go index e303d3fb8..d3b0b13b0 100644 --- a/experimental/mocks/service/sql/mock_query_visualizations_legacy_interface.go +++ b/experimental/mocks/service/sql/mock_query_visualizations_legacy_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_redash_config_interface.go b/experimental/mocks/service/sql/mock_redash_config_interface.go index e96d5957d..176da6c5f 100644 --- a/experimental/mocks/service/sql/mock_redash_config_interface.go +++ b/experimental/mocks/service/sql/mock_redash_config_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_statement_execution_interface.go b/experimental/mocks/service/sql/mock_statement_execution_interface.go index 718c6d443..546085a3a 100644 --- a/experimental/mocks/service/sql/mock_statement_execution_interface.go +++ b/experimental/mocks/service/sql/mock_statement_execution_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/sql/mock_warehouses_interface.go b/experimental/mocks/service/sql/mock_warehouses_interface.go index 7db4a09b6..be5e28a80 100644 --- a/experimental/mocks/service/sql/mock_warehouses_interface.go +++ b/experimental/mocks/service/sql/mock_warehouses_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package sql diff --git a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go index bac669a1c..50e397591 100644 --- a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go +++ b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package vectorsearch diff --git a/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go b/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go index a72f967db..8fd95e648 100644 --- a/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go +++ b/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package vectorsearch diff --git a/experimental/mocks/service/workspace/mock_git_credentials_interface.go b/experimental/mocks/service/workspace/mock_git_credentials_interface.go index 8f8c6b85c..79a40ccd6 100644 --- a/experimental/mocks/service/workspace/mock_git_credentials_interface.go +++ b/experimental/mocks/service/workspace/mock_git_credentials_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package workspace diff --git a/experimental/mocks/service/workspace/mock_repos_interface.go b/experimental/mocks/service/workspace/mock_repos_interface.go index ef069a72e..b757b92c8 100644 --- a/experimental/mocks/service/workspace/mock_repos_interface.go +++ b/experimental/mocks/service/workspace/mock_repos_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package workspace diff --git a/experimental/mocks/service/workspace/mock_secrets_interface.go b/experimental/mocks/service/workspace/mock_secrets_interface.go index cb469e55c..37dee1d19 100644 --- a/experimental/mocks/service/workspace/mock_secrets_interface.go +++ b/experimental/mocks/service/workspace/mock_secrets_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package workspace diff --git a/experimental/mocks/service/workspace/mock_workspace_interface.go b/experimental/mocks/service/workspace/mock_workspace_interface.go index 30b2ecf5d..89597a013 100644 --- a/experimental/mocks/service/workspace/mock_workspace_interface.go +++ b/experimental/mocks/service/workspace/mock_workspace_interface.go @@ -1,4 +1,4 @@ -// Code generated by mockery v2.43.0. DO NOT EDIT. +// Code generated by mockery v2.53.2. DO NOT EDIT. package workspace diff --git a/service/compute/impl.go b/service/compute/impl.go index 857e6229f..0b6a7fbf0 100755 --- a/service/compute/impl.go +++ b/service/compute/impl.go @@ -541,6 +541,7 @@ func (a *globalInitScriptsImpl) Delete(ctx context.Context, request DeleteGlobal path := fmt.Sprintf("/api/2.0/global-init-scripts/%v", request.ScriptId) queryParams := make(map[string]any) headers := make(map[string]string) + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } @@ -606,6 +607,7 @@ func (a *globalInitScriptsImpl) Update(ctx context.Context, request GlobalInitSc path := fmt.Sprintf("/api/2.0/global-init-scripts/%v", request.ScriptId) queryParams := make(map[string]any) headers := make(map[string]string) + headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) return err diff --git a/service/compute/model.go b/service/compute/model.go index 32881163f..561eb3fc2 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2196,6 +2196,10 @@ type DestroyContext struct { type DestroyResponse struct { } +// Describes the disks that are launched for each instance in the spark cluster. +// For example, if the cluster has 3 instances, each instance is configured to +// launch 2 disks, 100 GiB each, then Databricks will launch a total of 6 disks, +// 100 GiB each, for this cluster. type DiskSpec struct { // The number of disks launched for each instance: - This feature is only // enabled for supported node types. - Users can choose up to the limit of @@ -2241,12 +2245,18 @@ func (s DiskSpec) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Describes the disk type. type DiskType struct { + // All Azure Disk types that Databricks supports. See + // https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks AzureDiskVolumeType DiskTypeAzureDiskVolumeType `json:"azure_disk_volume_type,omitempty"` - + // All EBS volume types that Databricks supports. See + // https://aws.amazon.com/ebs/details/ for details. EbsVolumeType DiskTypeEbsVolumeType `json:"ebs_volume_type,omitempty"` } +// All Azure Disk types that Databricks supports. See +// https://docs.microsoft.com/en-us/azure/storage/storage-about-disks-and-vhds-linux#types-of-disks type DiskTypeAzureDiskVolumeType string const DiskTypeAzureDiskVolumeTypePremiumLrs DiskTypeAzureDiskVolumeType = `PREMIUM_LRS` @@ -2274,6 +2284,8 @@ func (f *DiskTypeAzureDiskVolumeType) Type() string { return "DiskTypeAzureDiskVolumeType" } +// All EBS volume types that Databricks supports. See +// https://aws.amazon.com/ebs/details/ for details. type DiskTypeEbsVolumeType string const DiskTypeEbsVolumeTypeGeneralPurposeSsd DiskTypeEbsVolumeType = `GENERAL_PURPOSE_SSD` @@ -2319,6 +2331,7 @@ func (s DockerBasicAuth) MarshalJSON() ([]byte, error) { } type DockerImage struct { + // Basic auth with username and password BasicAuth *DockerBasicAuth `json:"basic_auth,omitempty"` // URL of the docker image. Url string `json:"url,omitempty"` @@ -3152,7 +3165,7 @@ type GetInstancePool struct { // // - Currently, Databricks allows at most 45 custom tags CustomTags map[string]string `json:"custom_tags,omitempty"` - // Tags that are added by Databricks regardless of any `custom_tags`, + // Tags that are added by Databricks regardless of any ``custom_tags``, // including: // // - Vendor: Databricks @@ -3579,7 +3592,7 @@ type InstancePoolAndStats struct { // // - Currently, Databricks allows at most 45 custom tags CustomTags map[string]string `json:"custom_tags,omitempty"` - // Tags that are added by Databricks regardless of any `custom_tags`, + // Tags that are added by Databricks regardless of any ``custom_tags``, // including: // // - Vendor: Databricks @@ -3652,11 +3665,10 @@ func (s InstancePoolAndStats) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Attributes set during instance pool creation which are related to Amazon Web +// Services. type InstancePoolAwsAttributes struct { // Availability type used for the spot nodes. - // - // The default value is defined by - // InstancePoolConf.instancePoolDefaultAwsAvailability Availability InstancePoolAwsAttributesAvailability `json:"availability,omitempty"` // Calculates the bid price for AWS spot instances, as a percentage of the // corresponding instance type's on-demand price. For example, if this field @@ -3668,10 +3680,6 @@ type InstancePoolAwsAttributes struct { // instances whose bid price percentage matches this field will be // considered. Note that, for safety, we enforce this field to be no more // than 10000. - // - // The default value and documentation here should be kept consistent with - // CommonConf.defaultSpotBidPricePercent and - // CommonConf.maxSpotBidPricePercent. SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` // Identifier for the availability zone/datacenter in which the cluster // resides. This string will be of a form like "us-west-2a". The provided @@ -3694,10 +3702,8 @@ func (s InstancePoolAwsAttributes) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Availability type used for the spot nodes. -// -// The default value is defined by -// InstancePoolConf.instancePoolDefaultAwsAvailability +// The set of AWS availability types supported when setting up nodes for a +// cluster. type InstancePoolAwsAttributesAvailability string const InstancePoolAwsAttributesAvailabilityOnDemand InstancePoolAwsAttributesAvailability = `ON_DEMAND` @@ -3725,14 +3731,16 @@ func (f *InstancePoolAwsAttributesAvailability) Type() string { return "InstancePoolAwsAttributesAvailability" } +// Attributes set during instance pool creation which are related to Azure. type InstancePoolAzureAttributes struct { - // Shows the Availability type used for the spot nodes. - // - // The default value is defined by - // InstancePoolConf.instancePoolDefaultAzureAvailability + // Availability type used for the spot nodes. Availability InstancePoolAzureAttributesAvailability `json:"availability,omitempty"` - // The default value and documentation here should be kept consistent with - // CommonConf.defaultSpotBidMaxPrice. + // With variable pricing, you have option to set a max price, in US dollars + // (USD) For example, the value 2 would be a max price of $2.00 USD per + // hour. If you set the max price to be -1, the VM won't be evicted based on + // price. The price for the VM will be the current price for spot or the + // price for a standard VM, which ever is less, as long as there is capacity + // and quota available. SpotBidMaxPrice float64 `json:"spot_bid_max_price,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -3746,10 +3754,8 @@ func (s InstancePoolAzureAttributes) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Shows the Availability type used for the spot nodes. -// -// The default value is defined by -// InstancePoolConf.instancePoolDefaultAzureAvailability +// The set of Azure availability types supported when setting up nodes for a +// cluster. type InstancePoolAzureAttributesAvailability string const InstancePoolAzureAttributesAvailabilityOnDemandAzure InstancePoolAzureAttributesAvailability = `ON_DEMAND_AZURE` @@ -3777,6 +3783,7 @@ func (f *InstancePoolAzureAttributesAvailability) Type() string { return "InstancePoolAzureAttributesAvailability" } +// Attributes set during instance pool creation which are related to GCP. type InstancePoolGcpAttributes struct { // This field determines whether the instance pool will contain preemptible // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs @@ -3904,7 +3911,11 @@ type InstancePoolPermissionsRequest struct { InstancePoolId string `json:"-" url:"-"` } -// Current state of the instance pool. +// The state of a Cluster. The current allowable state transitions are as +// follows: +// +// - “ACTIVE“ -> “STOPPED“ - “ACTIVE“ -> “DELETED“ - “STOPPED“ -> +// “ACTIVE“ - “STOPPED“ -> “DELETED“ type InstancePoolState string const InstancePoolStateActive InstancePoolState = `ACTIVE` @@ -4650,6 +4661,7 @@ func (s NodeType) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Error message of a failed pending instances type PendingInstanceError struct { InstanceId string `json:"instance_id,omitempty"` diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 1f7df53c2..5355772f0 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -35,7 +35,13 @@ type GenieInterface interface { // Deprecated: use [GenieAPIInterface.CreateMessage].Get() or [GenieAPIInterface.WaitGetMessageGenieCompleted] CreateMessageAndWait(ctx context.Context, genieCreateConversationMessageRequest GenieCreateConversationMessageRequest, options ...retries.Option[GenieMessage]) (*GenieMessage, error) - // Execute SQL query in a conversation message. + // Execute message attachment SQL query. + // + // Execute the SQL for a message query attachment. Use this API when the query + // attachment has expired and needs to be re-executed. + ExecuteMessageAttachmentQuery(ctx context.Context, request GenieExecuteMessageAttachmentQueryRequest) (*GenieGetMessageQueryResultResponse, error) + + // [Deprecated] Execute SQL query in a conversation message. // // Execute the SQL query in the message. ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) @@ -50,6 +56,20 @@ type GenieInterface interface { // Get message from conversation. GetMessageBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieMessage, error) + // Get message attachment SQL query result. + // + // Get the result of SQL query if the message has a query attachment. This is + // only available if a message has a query attachment and the message status is + // `EXECUTING_QUERY` OR `COMPLETED`. + GetMessageAttachmentQueryResult(ctx context.Context, request GenieGetMessageAttachmentQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) + + // Get message attachment SQL query result. + // + // Get the result of SQL query if the message has a query attachment. This is + // only available if a message has a query attachment and the message status is + // `EXECUTING_QUERY` OR `COMPLETED`. + GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) + // [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This is @@ -64,28 +84,28 @@ type GenieInterface interface { // `EXECUTING_QUERY`. GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(ctx context.Context, spaceId string, conversationId string, messageId string) (*GenieGetMessageQueryResultResponse, error) - // Get conversation message SQL query result. + // [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This is // only available if a message has a query attachment and the message status is // `EXECUTING_QUERY` OR `COMPLETED`. GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) - // Get conversation message SQL query result. + // [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This is // only available if a message has a query attachment and the message status is // `EXECUTING_QUERY` OR `COMPLETED`. GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) - // Get details of a Genie Space. + // Get Genie Space. // - // Get a Genie Space. + // Get details of a Genie Space. GetSpace(ctx context.Context, request GenieGetSpaceRequest) (*GenieSpace, error) - // Get details of a Genie Space. + // Get Genie Space. // - // Get a Genie Space. + // Get details of a Genie Space. GetSpaceBySpaceId(ctx context.Context, spaceId string) (*GenieSpace, error) // Start conversation. @@ -238,6 +258,20 @@ func (a *GenieAPI) GetMessageBySpaceIdAndConversationIdAndMessageId(ctx context. }) } +// Get message attachment SQL query result. +// +// Get the result of SQL query if the message has a query attachment. This is +// only available if a message has a query attachment and the message status is +// `EXECUTING_QUERY` OR `COMPLETED`. +func (a *GenieAPI) GetMessageAttachmentQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string) (*GenieGetMessageQueryResultResponse, error) { + return a.genieImpl.GetMessageAttachmentQueryResult(ctx, GenieGetMessageAttachmentQueryResultRequest{ + SpaceId: spaceId, + ConversationId: conversationId, + MessageId: messageId, + AttachmentId: attachmentId, + }) +} + // [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This is @@ -251,7 +285,7 @@ func (a *GenieAPI) GetMessageQueryResultBySpaceIdAndConversationIdAndMessageId(c }) } -// Get conversation message SQL query result. +// [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This is // only available if a message has a query attachment and the message status is @@ -265,9 +299,9 @@ func (a *GenieAPI) GetMessageQueryResultByAttachmentBySpaceIdAndConversationIdAn }) } -// Get details of a Genie Space. +// Get Genie Space. // -// Get a Genie Space. +// Get details of a Genie Space. func (a *GenieAPI) GetSpaceBySpaceId(ctx context.Context, spaceId string) (*GenieSpace, error) { return a.genieImpl.GetSpace(ctx, GenieGetSpaceRequest{ SpaceId: spaceId, diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index e7d46e03e..8bff281d2 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -28,6 +28,16 @@ func (a *genieImpl) CreateMessage(ctx context.Context, request GenieCreateConver return &genieMessage, err } +func (a *genieImpl) ExecuteMessageAttachmentQuery(ctx context.Context, request GenieExecuteMessageAttachmentQueryRequest) (*GenieGetMessageQueryResultResponse, error) { + var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse + path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/attachments/%v/execute-query", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &genieGetMessageQueryResultResponse) + return &genieGetMessageQueryResultResponse, err +} + func (a *genieImpl) ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) { var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/execute-query", request.SpaceId, request.ConversationId, request.MessageId) @@ -48,6 +58,16 @@ func (a *genieImpl) GetMessage(ctx context.Context, request GenieGetConversation return &genieMessage, err } +func (a *genieImpl) GetMessageAttachmentQueryResult(ctx context.Context, request GenieGetMessageAttachmentQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) { + var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse + path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/attachments/%v/query-result", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieGetMessageQueryResultResponse) + return &genieGetMessageQueryResultResponse, err +} + func (a *genieImpl) GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) { var genieGetMessageQueryResultResponse GenieGetMessageQueryResultResponse path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/query-result", request.SpaceId, request.ConversationId, request.MessageId) diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index 0c8eb4947..a53486dc4 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -20,7 +20,13 @@ type GenieService interface { // to respond. CreateMessage(ctx context.Context, request GenieCreateConversationMessageRequest) (*GenieMessage, error) - // Execute SQL query in a conversation message. + // Execute message attachment SQL query. + // + // Execute the SQL for a message query attachment. Use this API when the + // query attachment has expired and needs to be re-executed. + ExecuteMessageAttachmentQuery(ctx context.Context, request GenieExecuteMessageAttachmentQueryRequest) (*GenieGetMessageQueryResultResponse, error) + + // [Deprecated] Execute SQL query in a conversation message. // // Execute the SQL query in the message. ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) @@ -30,6 +36,13 @@ type GenieService interface { // Get message from conversation. GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) + // Get message attachment SQL query result. + // + // Get the result of SQL query if the message has a query attachment. This + // is only available if a message has a query attachment and the message + // status is `EXECUTING_QUERY` OR `COMPLETED`. + GetMessageAttachmentQueryResult(ctx context.Context, request GenieGetMessageAttachmentQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) + // [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This @@ -37,16 +50,16 @@ type GenieService interface { // status is `EXECUTING_QUERY`. GetMessageQueryResult(ctx context.Context, request GenieGetMessageQueryResultRequest) (*GenieGetMessageQueryResultResponse, error) - // Get conversation message SQL query result. + // [Deprecated] Get conversation message SQL query result. // // Get the result of SQL query if the message has a query attachment. This // is only available if a message has a query attachment and the message // status is `EXECUTING_QUERY` OR `COMPLETED`. GetMessageQueryResultByAttachment(ctx context.Context, request GenieGetQueryResultByAttachmentRequest) (*GenieGetMessageQueryResultResponse, error) - // Get details of a Genie Space. + // Get Genie Space. // - // Get a Genie Space. + // Get details of a Genie Space. GetSpace(ctx context.Context, request GenieGetSpaceRequest) (*GenieSpace, error) // Start conversation. diff --git a/service/dashboards/model.go b/service/dashboards/model.go index dd52ed9da..64074e89d 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -282,7 +282,19 @@ type GenieCreateConversationMessageRequest struct { SpaceId string `json:"-" url:"-"` } -// Execute SQL query in a conversation message +// Execute message attachment SQL query +type GenieExecuteMessageAttachmentQueryRequest struct { + // Attachment ID + AttachmentId string `json:"-" url:"-"` + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Genie space ID + SpaceId string `json:"-" url:"-"` +} + +// [Deprecated] Execute SQL query in a conversation message type GenieExecuteMessageQueryRequest struct { // Conversation ID ConversationId string `json:"-" url:"-"` @@ -304,6 +316,18 @@ type GenieGetConversationMessageRequest struct { SpaceId string `json:"-" url:"-"` } +// Get message attachment SQL query result +type GenieGetMessageAttachmentQueryResultRequest struct { + // Attachment ID + AttachmentId string `json:"-" url:"-"` + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Genie space ID + SpaceId string `json:"-" url:"-"` +} + // [Deprecated] Get conversation message SQL query result type GenieGetMessageQueryResultRequest struct { // Conversation ID @@ -320,7 +344,7 @@ type GenieGetMessageQueryResultResponse struct { StatementResponse *sql.StatementResponse `json:"statement_response,omitempty"` } -// Get conversation message SQL query result +// [Deprecated] Get conversation message SQL query result type GenieGetQueryResultByAttachmentRequest struct { // Attachment ID AttachmentId string `json:"-" url:"-"` @@ -332,7 +356,7 @@ type GenieGetQueryResultByAttachmentRequest struct { SpaceId string `json:"-" url:"-"` } -// Get details of a Genie Space +// Get Genie Space type GenieGetSpaceRequest struct { // The ID associated with the Genie space SpaceId string `json:"-" url:"-"` @@ -368,14 +392,16 @@ type GenieMessage struct { // for warehouse before the SQL query can start executing. * // `EXECUTING_QUERY`: Executing a generated SQL query. Get the SQL query // result by calling - // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * - // `FAILED`: The response generation or query execution failed. See `error` - // field. * `COMPLETED`: Message processing is completed. Results are in the - // `attachments` field. Get the SQL query result by calling - // [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * - // `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL - // result is not available anymore. The user needs to rerun the query. * - // `CANCELLED`: Message has been cancelled. + // [getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) + // API. * `FAILED`: The response generation or query execution failed. See + // `error` field. * `COMPLETED`: Message processing is completed. Results + // are in the `attachments` field. Get the SQL query result by calling + // [getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) + // API. * `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: + // SQL result is not available anymore. The user needs to rerun the query. + // Rerun the SQL query result by calling + // [executeMessageAttachmentQuery](:method:genie/executeMessageAttachmentQuery) + // API. * `CANCELLED`: Message has been cancelled. Status MessageStatus `json:"status,omitempty"` // ID of the user who created the message UserId int64 `json:"user_id,omitempty"` @@ -402,6 +428,10 @@ type GenieQueryAttachment struct { Query string `json:"query,omitempty"` // Metadata associated with the query result. QueryResultMetadata *GenieResultMetadata `json:"query_result_metadata,omitempty"` + // Statement Execution API statement id. Use [Get status, manifest, and + // result first chunk](:method:statementexecution/getstatement) to get the + // full result data. + StatementId string `json:"statement_id,omitempty"` // Name of the query Title string `json:"title,omitempty"` @@ -689,6 +719,8 @@ const MessageErrorTypeContentFilterException MessageErrorType = `CONTENT_FILTER_ const MessageErrorTypeContextExceededException MessageErrorType = `CONTEXT_EXCEEDED_EXCEPTION` +const MessageErrorTypeCouldNotGetModelDeploymentsException MessageErrorType = `COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION` + const MessageErrorTypeCouldNotGetUcSchemaException MessageErrorType = `COULD_NOT_GET_UC_SCHEMA_EXCEPTION` const MessageErrorTypeDeploymentNotFoundException MessageErrorType = `DEPLOYMENT_NOT_FOUND_EXCEPTION` @@ -771,11 +803,11 @@ func (f *MessageErrorType) String() string { // Set raw string value and validate it against allowed values func (f *MessageErrorType) Set(v string) error { switch v { - case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: + case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: *f = MessageErrorType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) + return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) } } @@ -790,14 +822,16 @@ func (f *MessageErrorType) Type() string { // respond to the user's question. * `PENDING_WAREHOUSE`: Waiting for warehouse // before the SQL query can start executing. * `EXECUTING_QUERY`: Executing a // generated SQL query. Get the SQL query result by calling -// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * `FAILED`: -// The response generation or query execution failed. See `error` field. * -// `COMPLETED`: Message processing is completed. Results are in the -// `attachments` field. Get the SQL query result by calling -// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. * -// `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL result -// is not available anymore. The user needs to rerun the query. * `CANCELLED`: -// Message has been cancelled. +// [getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) +// API. * `FAILED`: The response generation or query execution failed. See +// `error` field. * `COMPLETED`: Message processing is completed. Results are in +// the `attachments` field. Get the SQL query result by calling +// [getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) +// API. * `SUBMITTED`: Message has been submitted. * `QUERY_RESULT_EXPIRED`: SQL +// result is not available anymore. The user needs to rerun the query. Rerun the +// SQL query result by calling +// [executeMessageAttachmentQuery](:method:genie/executeMessageAttachmentQuery) +// API. * `CANCELLED`: Message has been cancelled. type MessageStatus string // Waiting for the LLM to respond to the user's question. @@ -808,11 +842,13 @@ const MessageStatusCancelled MessageStatus = `CANCELLED` // Message processing is completed. Results are in the `attachments` field. Get // the SQL query result by calling -// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. +// [getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) +// API. const MessageStatusCompleted MessageStatus = `COMPLETED` // Executing a generated SQL query. Get the SQL query result by calling -// [getMessageQueryResult](:method:genie/getMessageQueryResult) API. +// [getMessageAttachmentQueryResult](:method:genie/getMessageAttachmentQueryResult) +// API. const MessageStatusExecutingQuery MessageStatus = `EXECUTING_QUERY` // The response generation or query execution failed. See `error` field. @@ -827,7 +863,10 @@ const MessageStatusFilteringContext MessageStatus = `FILTERING_CONTEXT` // Waiting for warehouse before the SQL query can start executing. const MessageStatusPendingWarehouse MessageStatus = `PENDING_WAREHOUSE` -// SQL result is not available anymore. The user needs to rerun the query. +// SQL result is not available anymore. The user needs to rerun the query. Rerun +// the SQL query result by calling +// [executeMessageAttachmentQuery](:method:genie/executeMessageAttachmentQuery) +// API. const MessageStatusQueryResultExpired MessageStatus = `QUERY_RESULT_EXPIRED` // Message has been submitted. diff --git a/service/files/impl.go b/service/files/impl.go index 84a9001e6..85b5ae279 100755 --- a/service/files/impl.go +++ b/service/files/impl.go @@ -269,8 +269,7 @@ func (a *filesImpl) ListDirectoryContents(ctx context.Context, request ListDirec // specified path, the API returns a HTTP 404 error. func (a *filesImpl) ListDirectoryContentsAll(ctx context.Context, request ListDirectoryContentsRequest) ([]DirectoryEntry, error) { iterator := a.ListDirectoryContents(ctx, request) - return listing.ToSliceN[DirectoryEntry, int64](ctx, iterator, request.PageSize) - + return listing.ToSlice[DirectoryEntry](ctx, iterator) } func (a *filesImpl) internalListDirectoryContents(ctx context.Context, request ListDirectoryContentsRequest) (*ListDirectoryResponse, error) { diff --git a/service/files/model.go b/service/files/model.go index 71b3939f6..1205e1281 100755 --- a/service/files/model.go +++ b/service/files/model.go @@ -137,12 +137,13 @@ type DownloadRequest struct { } type DownloadResponse struct { + // The length of the HTTP response body in bytes. ContentLength int64 `json:"-" url:"-" header:"content-length,omitempty"` ContentType string `json:"-" url:"-" header:"content-type,omitempty"` Contents io.ReadCloser `json:"-"` - + // The last modified time of the file in HTTP-date (RFC 7231) format. LastModified string `json:"-" url:"-" header:"last-modified,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -193,10 +194,11 @@ type GetMetadataRequest struct { } type GetMetadataResponse struct { + // The length of the HTTP response body in bytes. ContentLength int64 `json:"-" url:"-" header:"content-length,omitempty"` ContentType string `json:"-" url:"-" header:"content-type,omitempty"` - + // The last modified time of the file in HTTP-date (RFC 7231) format. LastModified string `json:"-" url:"-" header:"last-modified,omitempty"` ForceSendFields []string `json:"-" url:"-"` diff --git a/service/jobs/impl.go b/service/jobs/impl.go index 53a9816a5..66b4ddc00 100755 --- a/service/jobs/impl.go +++ b/service/jobs/impl.go @@ -19,10 +19,9 @@ type jobsImpl struct { func (a *jobsImpl) CancelAllRuns(ctx context.Context, request CancelAllRuns) error { var cancelAllRunsResponse CancelAllRunsResponse - path := "/api/2.1/jobs/runs/cancel-all" + path := "/api/2.2/jobs/runs/cancel-all" queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &cancelAllRunsResponse) return err @@ -30,10 +29,9 @@ func (a *jobsImpl) CancelAllRuns(ctx context.Context, request CancelAllRuns) err func (a *jobsImpl) CancelRun(ctx context.Context, request CancelRun) error { var cancelRunResponse CancelRunResponse - path := "/api/2.1/jobs/runs/cancel" + path := "/api/2.2/jobs/runs/cancel" queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &cancelRunResponse) return err @@ -41,7 +39,7 @@ func (a *jobsImpl) CancelRun(ctx context.Context, request CancelRun) error { func (a *jobsImpl) Create(ctx context.Context, request CreateJob) (*CreateResponse, error) { var createResponse CreateResponse - path := "/api/2.1/jobs/create" + path := "/api/2.2/jobs/create" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -52,10 +50,9 @@ func (a *jobsImpl) Create(ctx context.Context, request CreateJob) (*CreateRespon func (a *jobsImpl) Delete(ctx context.Context, request DeleteJob) error { var deleteResponse DeleteResponse - path := "/api/2.1/jobs/delete" + path := "/api/2.2/jobs/delete" queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteResponse) return err @@ -63,10 +60,9 @@ func (a *jobsImpl) Delete(ctx context.Context, request DeleteJob) error { func (a *jobsImpl) DeleteRun(ctx context.Context, request DeleteRun) error { var deleteRunResponse DeleteRunResponse - path := "/api/2.1/jobs/runs/delete" + path := "/api/2.2/jobs/runs/delete" queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteRunResponse) return err @@ -74,7 +70,7 @@ func (a *jobsImpl) DeleteRun(ctx context.Context, request DeleteRun) error { func (a *jobsImpl) ExportRun(ctx context.Context, request ExportRunRequest) (*ExportRunOutput, error) { var exportRunOutput ExportRunOutput - path := "/api/2.1/jobs/runs/export" + path := "/api/2.2/jobs/runs/export" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -84,7 +80,7 @@ func (a *jobsImpl) ExportRun(ctx context.Context, request ExportRunRequest) (*Ex func (a *jobsImpl) Get(ctx context.Context, request GetJobRequest) (*Job, error) { var job Job - path := "/api/2.1/jobs/get" + path := "/api/2.2/jobs/get" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -114,7 +110,7 @@ func (a *jobsImpl) GetPermissions(ctx context.Context, request GetJobPermissions func (a *jobsImpl) GetRun(ctx context.Context, request GetRunRequest) (*Run, error) { var run Run - path := "/api/2.1/jobs/runs/get" + path := "/api/2.2/jobs/runs/get" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -124,7 +120,7 @@ func (a *jobsImpl) GetRun(ctx context.Context, request GetRunRequest) (*Run, err func (a *jobsImpl) GetRunOutput(ctx context.Context, request GetRunOutputRequest) (*RunOutput, error) { var runOutput RunOutput - path := "/api/2.1/jobs/runs/get-output" + path := "/api/2.2/jobs/runs/get-output" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -169,7 +165,7 @@ func (a *jobsImpl) ListAll(ctx context.Context, request ListJobsRequest) ([]Base func (a *jobsImpl) internalList(ctx context.Context, request ListJobsRequest) (*ListJobsResponse, error) { var listJobsResponse ListJobsResponse - path := "/api/2.1/jobs/list" + path := "/api/2.2/jobs/list" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -214,7 +210,7 @@ func (a *jobsImpl) ListRunsAll(ctx context.Context, request ListRunsRequest) ([] func (a *jobsImpl) internalListRuns(ctx context.Context, request ListRunsRequest) (*ListRunsResponse, error) { var listRunsResponse ListRunsResponse - path := "/api/2.1/jobs/runs/list" + path := "/api/2.2/jobs/runs/list" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -224,7 +220,7 @@ func (a *jobsImpl) internalListRuns(ctx context.Context, request ListRunsRequest func (a *jobsImpl) RepairRun(ctx context.Context, request RepairRun) (*RepairRunResponse, error) { var repairRunResponse RepairRunResponse - path := "/api/2.1/jobs/runs/repair" + path := "/api/2.2/jobs/runs/repair" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -235,10 +231,9 @@ func (a *jobsImpl) RepairRun(ctx context.Context, request RepairRun) (*RepairRun func (a *jobsImpl) Reset(ctx context.Context, request ResetJob) error { var resetResponse ResetResponse - path := "/api/2.1/jobs/reset" + path := "/api/2.2/jobs/reset" queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &resetResponse) return err @@ -246,7 +241,7 @@ func (a *jobsImpl) Reset(ctx context.Context, request ResetJob) error { func (a *jobsImpl) RunNow(ctx context.Context, request RunNow) (*RunNowResponse, error) { var runNowResponse RunNowResponse - path := "/api/2.1/jobs/run-now" + path := "/api/2.2/jobs/run-now" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -268,7 +263,7 @@ func (a *jobsImpl) SetPermissions(ctx context.Context, request JobPermissionsReq func (a *jobsImpl) Submit(ctx context.Context, request SubmitRun) (*SubmitRunResponse, error) { var submitRunResponse SubmitRunResponse - path := "/api/2.1/jobs/runs/submit" + path := "/api/2.2/jobs/runs/submit" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -279,10 +274,9 @@ func (a *jobsImpl) Submit(ctx context.Context, request SubmitRun) (*SubmitRunRes func (a *jobsImpl) Update(ctx context.Context, request UpdateJob) error { var updateResponse UpdateResponse - path := "/api/2.1/jobs/update" + path := "/api/2.2/jobs/update" queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &updateResponse) return err diff --git a/service/marketplace/model.go b/service/marketplace/model.go index 1dda602a9..e4669a62d 100755 --- a/service/marketplace/model.go +++ b/service/marketplace/model.go @@ -20,6 +20,8 @@ type AddExchangeForListingResponse struct { type AssetType string +const AssetTypeAssetTypeApp AssetType = `ASSET_TYPE_APP` + const AssetTypeAssetTypeDataTable AssetType = `ASSET_TYPE_DATA_TABLE` const AssetTypeAssetTypeGitRepo AssetType = `ASSET_TYPE_GIT_REPO` @@ -40,11 +42,11 @@ func (f *AssetType) String() string { // Set raw string value and validate it against allowed values func (f *AssetType) Set(v string) error { switch v { - case `ASSET_TYPE_DATA_TABLE`, `ASSET_TYPE_GIT_REPO`, `ASSET_TYPE_MEDIA`, `ASSET_TYPE_MODEL`, `ASSET_TYPE_NOTEBOOK`, `ASSET_TYPE_PARTNER_INTEGRATION`: + case `ASSET_TYPE_APP`, `ASSET_TYPE_DATA_TABLE`, `ASSET_TYPE_GIT_REPO`, `ASSET_TYPE_MEDIA`, `ASSET_TYPE_MODEL`, `ASSET_TYPE_NOTEBOOK`, `ASSET_TYPE_PARTNER_INTEGRATION`: *f = AssetType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ASSET_TYPE_DATA_TABLE", "ASSET_TYPE_GIT_REPO", "ASSET_TYPE_MEDIA", "ASSET_TYPE_MODEL", "ASSET_TYPE_NOTEBOOK", "ASSET_TYPE_PARTNER_INTEGRATION"`, v) + return fmt.Errorf(`value "%s" is not one of "ASSET_TYPE_APP", "ASSET_TYPE_DATA_TABLE", "ASSET_TYPE_GIT_REPO", "ASSET_TYPE_MEDIA", "ASSET_TYPE_MODEL", "ASSET_TYPE_NOTEBOOK", "ASSET_TYPE_PARTNER_INTEGRATION"`, v) } } diff --git a/service/ml/api.go b/service/ml/api.go index ee8f4856d..99ecc63d5 100755 --- a/service/ml/api.go +++ b/service/ml/api.go @@ -1,13 +1,17 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Experiments, Model Registry, etc. +// These APIs allow you to manage Experiments, Forecasting, Model Registry, etc. package ml import ( "context" + "fmt" + "time" "github.com/databricks/databricks-sdk-go/client" "github.com/databricks/databricks-sdk-go/listing" + "github.com/databricks/databricks-sdk-go/retries" + "github.com/databricks/databricks-sdk-go/useragent" ) type ExperimentsInterface interface { @@ -367,6 +371,159 @@ func (a *ExperimentsAPI) GetPermissionsByExperimentId(ctx context.Context, exper }) } +type ForecastingInterface interface { + + // WaitGetExperimentForecastingSucceeded repeatedly calls [ForecastingAPI.GetExperiment] and waits to reach SUCCEEDED state + WaitGetExperimentForecastingSucceeded(ctx context.Context, experimentId string, + timeout time.Duration, callback func(*ForecastingExperiment)) (*ForecastingExperiment, error) + + // Create a forecasting experiment. + // + // Creates a serverless forecasting experiment. Returns the experiment ID. + CreateExperiment(ctx context.Context, createForecastingExperimentRequest CreateForecastingExperimentRequest) (*WaitGetExperimentForecastingSucceeded[CreateForecastingExperimentResponse], error) + + // Calls [ForecastingAPIInterface.CreateExperiment] and waits to reach SUCCEEDED state + // + // You can override the default timeout of 120 minutes by calling adding + // retries.Timeout[ForecastingExperiment](60*time.Minute) functional option. + // + // Deprecated: use [ForecastingAPIInterface.CreateExperiment].Get() or [ForecastingAPIInterface.WaitGetExperimentForecastingSucceeded] + CreateExperimentAndWait(ctx context.Context, createForecastingExperimentRequest CreateForecastingExperimentRequest, options ...retries.Option[ForecastingExperiment]) (*ForecastingExperiment, error) + + // Get a forecasting experiment. + // + // Public RPC to get forecasting experiment + GetExperiment(ctx context.Context, request GetForecastingExperimentRequest) (*ForecastingExperiment, error) + + // Get a forecasting experiment. + // + // Public RPC to get forecasting experiment + GetExperimentByExperimentId(ctx context.Context, experimentId string) (*ForecastingExperiment, error) +} + +func NewForecasting(client *client.DatabricksClient) *ForecastingAPI { + return &ForecastingAPI{ + forecastingImpl: forecastingImpl{ + client: client, + }, + } +} + +// The Forecasting API allows you to create and get serverless forecasting +// experiments +type ForecastingAPI struct { + forecastingImpl +} + +// WaitGetExperimentForecastingSucceeded repeatedly calls [ForecastingAPI.GetExperiment] and waits to reach SUCCEEDED state +func (a *ForecastingAPI) WaitGetExperimentForecastingSucceeded(ctx context.Context, experimentId string, + timeout time.Duration, callback func(*ForecastingExperiment)) (*ForecastingExperiment, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "long-running") + return retries.Poll[ForecastingExperiment](ctx, timeout, func() (*ForecastingExperiment, *retries.Err) { + forecastingExperiment, err := a.GetExperiment(ctx, GetForecastingExperimentRequest{ + ExperimentId: experimentId, + }) + if err != nil { + return nil, retries.Halt(err) + } + if callback != nil { + callback(forecastingExperiment) + } + status := forecastingExperiment.State + statusMessage := fmt.Sprintf("current status: %s", status) + switch status { + case ForecastingExperimentStateSucceeded: // target state + return forecastingExperiment, nil + case ForecastingExperimentStateFailed, ForecastingExperimentStateCancelled: + err := fmt.Errorf("failed to reach %s, got %s: %s", + ForecastingExperimentStateSucceeded, status, statusMessage) + return nil, retries.Halt(err) + default: + return nil, retries.Continues(statusMessage) + } + }) +} + +// WaitGetExperimentForecastingSucceeded is a wrapper that calls [ForecastingAPI.WaitGetExperimentForecastingSucceeded] and waits to reach SUCCEEDED state. +type WaitGetExperimentForecastingSucceeded[R any] struct { + Response *R + ExperimentId string `json:"experiment_id"` + Poll func(time.Duration, func(*ForecastingExperiment)) (*ForecastingExperiment, error) + callback func(*ForecastingExperiment) + timeout time.Duration +} + +// OnProgress invokes a callback every time it polls for the status update. +func (w *WaitGetExperimentForecastingSucceeded[R]) OnProgress(callback func(*ForecastingExperiment)) *WaitGetExperimentForecastingSucceeded[R] { + w.callback = callback + return w +} + +// Get the ForecastingExperiment with the default timeout of 120 minutes. +func (w *WaitGetExperimentForecastingSucceeded[R]) Get() (*ForecastingExperiment, error) { + return w.Poll(w.timeout, w.callback) +} + +// Get the ForecastingExperiment with custom timeout. +func (w *WaitGetExperimentForecastingSucceeded[R]) GetWithTimeout(timeout time.Duration) (*ForecastingExperiment, error) { + return w.Poll(timeout, w.callback) +} + +// Create a forecasting experiment. +// +// Creates a serverless forecasting experiment. Returns the experiment ID. +func (a *ForecastingAPI) CreateExperiment(ctx context.Context, createForecastingExperimentRequest CreateForecastingExperimentRequest) (*WaitGetExperimentForecastingSucceeded[CreateForecastingExperimentResponse], error) { + createForecastingExperimentResponse, err := a.forecastingImpl.CreateExperiment(ctx, createForecastingExperimentRequest) + if err != nil { + return nil, err + } + return &WaitGetExperimentForecastingSucceeded[CreateForecastingExperimentResponse]{ + Response: createForecastingExperimentResponse, + ExperimentId: createForecastingExperimentResponse.ExperimentId, + Poll: func(timeout time.Duration, callback func(*ForecastingExperiment)) (*ForecastingExperiment, error) { + return a.WaitGetExperimentForecastingSucceeded(ctx, createForecastingExperimentResponse.ExperimentId, timeout, callback) + }, + timeout: 120 * time.Minute, + callback: nil, + }, nil +} + +// Calls [ForecastingAPI.CreateExperiment] and waits to reach SUCCEEDED state +// +// You can override the default timeout of 120 minutes by calling adding +// retries.Timeout[ForecastingExperiment](60*time.Minute) functional option. +// +// Deprecated: use [ForecastingAPI.CreateExperiment].Get() or [ForecastingAPI.WaitGetExperimentForecastingSucceeded] +func (a *ForecastingAPI) CreateExperimentAndWait(ctx context.Context, createForecastingExperimentRequest CreateForecastingExperimentRequest, options ...retries.Option[ForecastingExperiment]) (*ForecastingExperiment, error) { + wait, err := a.CreateExperiment(ctx, createForecastingExperimentRequest) + if err != nil { + return nil, err + } + tmp := &retries.Info[ForecastingExperiment]{Timeout: 120 * time.Minute} + for _, o := range options { + o(tmp) + } + wait.timeout = tmp.Timeout + wait.callback = func(info *ForecastingExperiment) { + for _, o := range options { + o(&retries.Info[ForecastingExperiment]{ + Info: info, + Timeout: wait.timeout, + }) + } + } + return wait.Get() +} + +// Get a forecasting experiment. +// +// Public RPC to get forecasting experiment +func (a *ForecastingAPI) GetExperimentByExperimentId(ctx context.Context, experimentId string) (*ForecastingExperiment, error) { + return a.forecastingImpl.GetExperiment(ctx, GetForecastingExperimentRequest{ + ExperimentId: experimentId, + }) +} + type ModelRegistryInterface interface { // Approve transition request. diff --git a/service/ml/impl.go b/service/ml/impl.go index 974feeb5a..cb27bdc46 100755 --- a/service/ml/impl.go +++ b/service/ml/impl.go @@ -528,6 +528,32 @@ func (a *experimentsImpl) UpdateRun(ctx context.Context, request UpdateRun) (*Up return &updateRunResponse, err } +// unexported type that holds implementations of just forecasting API methods +type forecastingImpl struct { + client *client.DatabricksClient +} + +func (a *forecastingImpl) CreateExperiment(ctx context.Context, request CreateForecastingExperimentRequest) (*CreateForecastingExperimentResponse, error) { + var createForecastingExperimentResponse CreateForecastingExperimentResponse + path := "/api/2.0/automl/create-forecasting-experiment" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createForecastingExperimentResponse) + return &createForecastingExperimentResponse, err +} + +func (a *forecastingImpl) GetExperiment(ctx context.Context, request GetForecastingExperimentRequest) (*ForecastingExperiment, error) { + var forecastingExperiment ForecastingExperiment + path := fmt.Sprintf("/api/2.0/automl/get-forecasting-experiment/%v", request.ExperimentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &forecastingExperiment) + return &forecastingExperiment, err +} + // unexported type that holds implementations of just ModelRegistry API methods type modelRegistryImpl struct { client *client.DatabricksClient diff --git a/service/ml/interface.go b/service/ml/interface.go index 53f2f3f0b..2d90493d2 100755 --- a/service/ml/interface.go +++ b/service/ml/interface.go @@ -289,6 +289,21 @@ type ExperimentsService interface { UpdateRun(ctx context.Context, request UpdateRun) (*UpdateRunResponse, error) } +// The Forecasting API allows you to create and get serverless forecasting +// experiments +type ForecastingService interface { + + // Create a forecasting experiment. + // + // Creates a serverless forecasting experiment. Returns the experiment ID. + CreateExperiment(ctx context.Context, request CreateForecastingExperimentRequest) (*CreateForecastingExperimentResponse, error) + + // Get a forecasting experiment. + // + // Public RPC to get forecasting experiment + GetExperiment(ctx context.Context, request GetForecastingExperimentRequest) (*ForecastingExperiment, error) +} + // Note: This API reference documents APIs for the Workspace Model Registry. // Databricks recommends using [Models in Unity // Catalog](/api/workspace/registeredmodels) instead. Models in Unity Catalog diff --git a/service/ml/model.go b/service/ml/model.go index 04901af1b..338134ae3 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -319,6 +319,96 @@ func (s CreateExperimentResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type CreateForecastingExperimentRequest struct { + // Name of the column in the input training table used to customize the + // weight for each time series to calculate weighted metrics. + CustomWeightsColumn string `json:"custom_weights_column,omitempty"` + // The quantity of the input data granularity. Together with + // data_granularity_unit field, this defines the time interval between + // consecutive rows in the time series data. For now, only 1 second, + // 1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year + // are supported. + DataGranularityQuantity int64 `json:"data_granularity_quantity,omitempty"` + // The time unit of the input data granularity. Together with + // data_granularity_quantity field, this defines the time interval between + // consecutive rows in the time series data. Possible values: * 'W' (weeks) + // * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / + // 'min' / 'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / + // 'month' / 'months' * 'Q' / 'quarter' / 'quarters' * 'Y' / 'year' / + // 'years' + DataGranularityUnit string `json:"data_granularity_unit"` + // The path to the created experiment. This is the path where the experiment + // will be stored in the workspace. + ExperimentPath string `json:"experiment_path,omitempty"` + // The number of time steps into the future for which predictions should be + // made. This value represents a multiple of data_granularity_unit and + // data_granularity_quantity determining how far ahead the model will + // forecast. + ForecastHorizon int64 `json:"forecast_horizon"` + // Region code(s) to consider when automatically adding holiday features. + // When empty, no holiday features are added. Only supports 1 holiday region + // for now. + HolidayRegions []string `json:"holiday_regions,omitempty"` + // The maximum duration in minutes for which the experiment is allowed to + // run. If the experiment exceeds this time limit it will be stopped + // automatically. + MaxRuntime int64 `json:"max_runtime,omitempty"` + // The three-level (fully qualified) path to a unity catalog table. This + // table path serves to store the predictions. + PredictionDataPath string `json:"prediction_data_path,omitempty"` + // The evaluation metric used to optimize the forecasting model. + PrimaryMetric string `json:"primary_metric,omitempty"` + // The three-level (fully qualified) path to a unity catalog model. This + // model path serves to store the best model. + RegisterTo string `json:"register_to,omitempty"` + // Name of the column in the input training table used for custom data + // splits. The values in this column must be "train", "validate", or "test" + // to indicate which split each row belongs to. + SplitColumn string `json:"split_column,omitempty"` + // Name of the column in the input training table that serves as the + // prediction target. The values in this column will be used as the ground + // truth for model training. + TargetColumn string `json:"target_column"` + // Name of the column in the input training table that represents the + // timestamp of each row. + TimeColumn string `json:"time_column"` + // Name of the column in the input training table used to group the dataset + // to predict individual time series + TimeseriesIdentifierColumns []string `json:"timeseries_identifier_columns,omitempty"` + // The three-level (fully qualified) name of a unity catalog table. This + // table serves as the training data for the forecasting model. + TrainDataPath string `json:"train_data_path"` + // The list of frameworks to include for model tuning. Possible values: + // 'Prophet', 'ARIMA', 'DeepAR'. An empty list will include all supported + // frameworks. + TrainingFrameworks []string `json:"training_frameworks,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CreateForecastingExperimentRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateForecastingExperimentRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type CreateForecastingExperimentResponse struct { + // The unique ID of the created forecasting experiment + ExperimentId string `json:"experiment_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CreateForecastingExperimentResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateForecastingExperimentResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type CreateModelRequest struct { // Optional description for registered model. Description string `json:"description,omitempty"` @@ -953,6 +1043,60 @@ func (s FileInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Represents a forecasting experiment with its unique identifier, URL, and +// state. +type ForecastingExperiment struct { + // The unique ID for the forecasting experiment. + ExperimentId string `json:"experiment_id,omitempty"` + // The URL to the forecasting experiment page. + ExperimentPageUrl string `json:"experiment_page_url,omitempty"` + // The current state of the forecasting experiment. + State ForecastingExperimentState `json:"state,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ForecastingExperiment) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ForecastingExperiment) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ForecastingExperimentState string + +const ForecastingExperimentStateCancelled ForecastingExperimentState = `CANCELLED` + +const ForecastingExperimentStateFailed ForecastingExperimentState = `FAILED` + +const ForecastingExperimentStatePending ForecastingExperimentState = `PENDING` + +const ForecastingExperimentStateRunning ForecastingExperimentState = `RUNNING` + +const ForecastingExperimentStateSucceeded ForecastingExperimentState = `SUCCEEDED` + +// String representation for [fmt.Print] +func (f *ForecastingExperimentState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ForecastingExperimentState) Set(v string) error { + switch v { + case `CANCELLED`, `FAILED`, `PENDING`, `RUNNING`, `SUCCEEDED`: + *f = ForecastingExperimentState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "CANCELLED", "FAILED", "PENDING", "RUNNING", "SUCCEEDED"`, v) + } +} + +// Type always returns ForecastingExperimentState to satisfy [pflag.Value] interface +func (f *ForecastingExperimentState) Type() string { + return "ForecastingExperimentState" +} + // Get an experiment by name type GetByNameRequest struct { // Name of the associated experiment. @@ -992,6 +1136,12 @@ type GetExperimentResponse struct { Experiment *Experiment `json:"experiment,omitempty"` } +// Get a forecasting experiment +type GetForecastingExperimentRequest struct { + // The unique ID of a forecasting experiment + ExperimentId string `json:"-" url:"-"` +} + // Get metric history for a run type GetHistoryRequest struct { // Maximum number of Metric records to return per paginated request. Default diff --git a/service/oauth2/model.go b/service/oauth2/model.go index 38974449d..7b65cd092 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -550,10 +550,11 @@ type OidcFederationPolicy struct { // tokens. Issuer string `json:"issuer,omitempty"` // The public keys used to validate the signature of federated tokens, in - // JWKS format. If unspecified (recommended), Databricks automatically - // fetches the public keys from your issuer’s well known endpoint. - // Databricks strongly recommends relying on your issuer’s well known - // endpoint for discovering public keys. + // JWKS format. Most use cases should not need to specify this field. If + // jwks_uri and jwks_json are both unspecified (recommended), Databricks + // automatically fetches the public keys from your issuer’s well known + // endpoint. Databricks strongly recommends relying on your issuer’s well + // known endpoint for discovering public keys. JwksJson string `json:"jwks_json,omitempty"` // The required token subject, as specified in the subject claim of // federated tokens. Must be specified for service principal federation diff --git a/service/pkg.go b/service/pkg.go index c68cd1100..3d35d9288 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -52,10 +52,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. -// // - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. // +// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. @@ -281,6 +281,8 @@ // - [provisioning.WorkspacesAPI]: These APIs manage workspaces for this account. // // - [billing.BudgetsAPI]: These APIs manage budget configurations for this account. +// +// - [ml.ForecastingAPI]: The Forecasting API allows you to create and get serverless forecasting experiments. package service import ( @@ -337,8 +339,8 @@ var ( _ *marketplace.ConsumerListingsAPI = nil _ *marketplace.ConsumerPersonalizationRequestsAPI = nil _ *marketplace.ConsumerProvidersAPI = nil - _ *catalog.CredentialsAPI = nil _ *provisioning.CredentialsAPI = nil + _ *catalog.CredentialsAPI = nil _ *settings.CredentialsManagerAPI = nil _ *settings.CspEnablementAccountAPI = nil _ *iam.CurrentUserAPI = nil @@ -452,4 +454,5 @@ var ( _ *settings.WorkspaceConfAPI = nil _ *provisioning.WorkspacesAPI = nil _ *billing.BudgetsAPI = nil + _ *ml.ForecastingAPI = nil ) diff --git a/service/serving/impl.go b/service/serving/impl.go index b8498ef53..f0170053f 100755 --- a/service/serving/impl.go +++ b/service/serving/impl.go @@ -47,7 +47,6 @@ func (a *servingEndpointsImpl) Delete(ctx context.Context, request DeleteServing path := fmt.Sprintf("/api/2.0/serving-endpoints/%v", request.Name) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } diff --git a/service/serving/model.go b/service/serving/model.go index 424e151f7..8a0bb6b63 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -466,6 +466,8 @@ type CreateServingEndpoint struct { // external model and provisioned throughput endpoints are currently // supported. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` + // The budget policy to be applied to the serving endpoint. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` // The core config of the serving endpoint. Config *EndpointCoreConfigInput `json:"config,omitempty"` // The name of the serving endpoint. This field is required and must be @@ -1858,6 +1860,8 @@ type ServingEndpoint struct { // external model and provisioned throughput endpoints are currently // supported. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` + // The budget policy associated with the endpoint. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigSummary `json:"config,omitempty"` // The timestamp when the endpoint was created in Unix time. @@ -1938,6 +1942,8 @@ type ServingEndpointDetailed struct { // external model and provisioned throughput endpoints are currently // supported. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` + // The budget policy associated with the endpoint. + BudgetPolicyId string `json:"budget_policy_id,omitempty"` // The config that is currently being served by the endpoint. Config *EndpointCoreConfigOutput `json:"config,omitempty"` // The timestamp when the endpoint was created in Unix time. diff --git a/service/settings/impl.go b/service/settings/impl.go index ee2a08181..ec11fe6ac 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -33,7 +33,6 @@ func (a *accountIpAccessListsImpl) Delete(ctx context.Context, request DeleteAcc path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } @@ -93,7 +92,6 @@ func (a *accountIpAccessListsImpl) Replace(ctx context.Context, request ReplaceI path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &replaceResponse) return err @@ -104,7 +102,6 @@ func (a *accountIpAccessListsImpl) Update(ctx context.Context, request UpdateIpA path := fmt.Sprintf("/api/2.0/accounts/%v/ip-access-lists/%v", a.client.ConfiguredAccountID(), request.IpAccessListId) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) return err @@ -534,7 +531,6 @@ func (a *ipAccessListsImpl) Delete(ctx context.Context, request DeleteIpAccessLi path := fmt.Sprintf("/api/2.0/ip-access-lists/%v", request.IpAccessListId) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } @@ -594,7 +590,6 @@ func (a *ipAccessListsImpl) Replace(ctx context.Context, request ReplaceIpAccess path := fmt.Sprintf("/api/2.0/ip-access-lists/%v", request.IpAccessListId) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &replaceResponse) return err @@ -605,7 +600,6 @@ func (a *ipAccessListsImpl) Update(ctx context.Context, request UpdateIpAccessLi path := fmt.Sprintf("/api/2.0/ip-access-lists/%v", request.IpAccessListId) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateResponse) return err @@ -958,7 +952,6 @@ func (a *tokenManagementImpl) Delete(ctx context.Context, request DeleteTokenMan path := fmt.Sprintf("/api/2.0/token-management/tokens/%v", request.TokenId) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } diff --git a/service/sharing/impl.go b/service/sharing/impl.go index c2aa1e251..c6d48d3fa 100755 --- a/service/sharing/impl.go +++ b/service/sharing/impl.go @@ -33,7 +33,6 @@ func (a *providersImpl) Delete(ctx context.Context, request DeleteProviderReques path := fmt.Sprintf("/api/2.1/unity-catalog/providers/%v", request.Name) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } @@ -219,7 +218,6 @@ func (a *recipientsImpl) Delete(ctx context.Context, request DeleteRecipientRequ path := fmt.Sprintf("/api/2.1/unity-catalog/recipients/%v", request.Name) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } @@ -340,7 +338,6 @@ func (a *sharesImpl) Delete(ctx context.Context, request DeleteShareRequest) err path := fmt.Sprintf("/api/2.1/unity-catalog/shares/%v", request.Name) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteResponse) return err } diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index 4fdb86ed0..4e5abe555 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -612,6 +612,7 @@ type QueryVectorIndexResponse struct { // [Optional] Token that can be used in `QueryVectorIndexNextPage` API to // get next page of results. If more than 1000 results satisfy the query, // they are returned in groups of 1000. Empty value means no more results. + // The maximum number of results that can be returned is 10,000. NextPageToken string `json:"next_page_token,omitempty"` // Data returned in the query result. Result *ResultData `json:"result,omitempty"` diff --git a/tagging.py b/tagging.py index c57621fb4..5504bdd0e 100644 --- a/tagging.py +++ b/tagging.py @@ -14,6 +14,7 @@ NEXT_CHANGELOG_FILE_NAME = "NEXT_CHANGELOG.md" CHANGELOG_FILE_NAME = "CHANGELOG.md" PACKAGE_FILE_NAME = ".package.json" +CODEGEN_FILE_NAME = ".codegen.json" """ This script tags the release of the SDKs using a combination of the GitHub API and Git commands. It reads the local repository to determine necessary changes, updates changelogs, and creates tags. @@ -153,14 +154,14 @@ def update_version_references(tag_info: TagInfo) -> None: Code references are defined in .package.json files. """ - # Load version patterns from '.package.json' file - package_file_path = os.path.join(os.getcwd(), tag_info.package.path, PACKAGE_FILE_NAME) + # Load version patterns from '.codegen.json' file at the top level of the repository + package_file_path = os.path.join(os.getcwd(), CODEGEN_FILE_NAME) with open(package_file_path, 'r') as file: package_file = json.load(file) version = package_file.get('version') if not version: - print(f"Version not found in .package.json. Nothing to update.") + print(f"`version` not found in .codegen.json. Nothing to update.") return # Update the versions diff --git a/workspace_client.go b/workspace_client.go index 1c34bd372..736262051 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -1132,6 +1132,10 @@ type WorkspaceClient struct { // This API allows updating known workspace settings for advanced users. WorkspaceConf settings.WorkspaceConfInterface + + // The Forecasting API allows you to create and get serverless forecasting + // experiments + Forecasting ml.ForecastingInterface } var ErrNotWorkspaceClient = errors.New("invalid Databricks Workspace configuration") @@ -1266,5 +1270,6 @@ func NewWorkspaceClient(c ...*Config) (*WorkspaceClient, error) { Workspace: workspace.NewWorkspace(databricksClient), WorkspaceBindings: catalog.NewWorkspaceBindings(databricksClient), WorkspaceConf: settings.NewWorkspaceConf(databricksClient), + Forecasting: ml.NewForecasting(databricksClient), }, nil } From 424a24bf8128015a9fe127c1cd412d1a8fe90c0f Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Tue, 11 Mar 2025 14:39:55 +0000 Subject: [PATCH 28/54] [Release] Release v0.60.0 ## Release v0.60.0 ### API Changes * Added [w.Forecasting](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ForecastingAPI) workspace-level service. * Added `ExecuteMessageAttachmentQuery` and `GetMessageAttachmentQueryResult` methods for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. * Added `StatementId` field for [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). * Added `BudgetPolicyId` field for [serving.CreateServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#CreateServingEndpoint). * Added `BudgetPolicyId` field for [serving.ServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpoint). * Added `BudgetPolicyId` field for [serving.ServingEndpointDetailed](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointDetailed). * Added `CouldNotGetModelDeploymentsException` enum value for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). --- .release_metadata.json | 2 +- CHANGELOG.md | 12 ++++++++++++ NEXT_CHANGELOG.md | 9 +-------- version/version.go | 2 +- 4 files changed, 15 insertions(+), 10 deletions(-) diff --git a/.release_metadata.json b/.release_metadata.json index 22a4039ad..36450aea5 100644 --- a/.release_metadata.json +++ b/.release_metadata.json @@ -1,3 +1,3 @@ { - "timestamp": "2025-03-03 15:48:14+0000" + "timestamp": "2025-03-11 14:39:51+0000" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index c4b041750..2c8ac907c 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,17 @@ # Version changelog +## Release v0.60.0 + +### API Changes +* Added [w.Forecasting](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ForecastingAPI) workspace-level service. +* Added `ExecuteMessageAttachmentQuery` and `GetMessageAttachmentQueryResult` methods for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Added `StatementId` field for [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). +* Added `BudgetPolicyId` field for [serving.CreateServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#CreateServingEndpoint). +* Added `BudgetPolicyId` field for [serving.ServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpoint). +* Added `BudgetPolicyId` field for [serving.ServingEndpointDetailed](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointDetailed). +* Added `CouldNotGetModelDeploymentsException` enum value for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). + + ## Release v0.59.0 ### Bug Fixes diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 15fdcd0ce..50a58aac7 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,6 +1,6 @@ # NEXT CHANGELOG -## Release v0.60.0 +## Release v0.61.0 ### New Features and Improvements @@ -11,10 +11,3 @@ ### Internal Changes ### API Changes -* Added [w.Forecasting](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ForecastingAPI) workspace-level service. -* Added `ExecuteMessageAttachmentQuery` and `GetMessageAttachmentQueryResult` methods for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. -* Added `StatementId` field for [dashboards.GenieQueryAttachment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieQueryAttachment). -* Added `BudgetPolicyId` field for [serving.CreateServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#CreateServingEndpoint). -* Added `BudgetPolicyId` field for [serving.ServingEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpoint). -* Added `BudgetPolicyId` field for [serving.ServingEndpointDetailed](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServingEndpointDetailed). -* Added `CouldNotGetModelDeploymentsException` enum value for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). diff --git a/version/version.go b/version/version.go index 18a61d2b0..6479f4d94 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.58.1" +const Version = "0.60.0" From c371ad8d967893cbb6de556e56f00beb94bac631 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Thu, 13 Mar 2025 14:12:53 +0100 Subject: [PATCH 29/54] Slightly simplify `CredentialsProvider` and add doc comments (#1169) ## What changes are proposed in this pull request? This PR is a noop that slightly refactors the `CredentialsProvider` and improve doc comments. ## How is this tested? Unit tests (no change) NO_CHANGELOG=true --- config/credentials/credentials.go | 22 ++++++++++------------ 1 file changed, 10 insertions(+), 12 deletions(-) diff --git a/config/credentials/credentials.go b/config/credentials/credentials.go index fe1041f15..1f692e8ab 100644 --- a/config/credentials/credentials.go +++ b/config/credentials/credentials.go @@ -6,25 +6,23 @@ import ( "golang.org/x/oauth2" ) -// CredentialsProvider is an interface for providing credentials to the client. -// Implementations of this interface should set the necessary headers on the request. +// CredentialsProvider represents anything that can set credentials, such as +// a token, in the headers of a request. type CredentialsProvider interface { - // SetHeaders sets the necessary headers on the request. + // SetHeaders sets the credential in the request's headers. SetHeaders(r *http.Request) error } -type credentialsProvider struct { - setHeaders func(r *http.Request) error -} +type credentialsProvider func(r *http.Request) error -func (c *credentialsProvider) SetHeaders(r *http.Request) error { - return c.setHeaders(r) +func (c credentialsProvider) SetHeaders(r *http.Request) error { + return c(r) } -func NewCredentialsProvider(visitor func(r *http.Request) error) CredentialsProvider { - return &credentialsProvider{ - setHeaders: visitor, - } +// NewCredentialsProvider returns a new CredentialsProvider that uses the +// provided function to set headers on the request. +func NewCredentialsProvider(f func(r *http.Request) error) CredentialsProvider { + return credentialsProvider(f) } // OAuthCredentialsProvider is a specialized CredentialsProvider uses and provides an OAuth token. From 3cbc19658888e607df1307d6ba4d9d78a04864b9 Mon Sep 17 00:00:00 2001 From: shreyas-goenka <88374338+shreyas-goenka@users.noreply.github.com> Date: Thu, 20 Mar 2025 21:58:59 +0530 Subject: [PATCH 30/54] Do not close all TCP connections on an HTTP error (#1188) ## What changes are proposed in this pull request? Closing the TCP pool on every error is unnecessary. The function was meant to be used for http clients that are short lived to avoid leaking resources (ref: https://github.com/golang/go/issues/26563). For the Go SDK, the HTTP client is long-lived, and thus, there's no reason to proactively close the TCP connection pool on errors. By default, the HTTP protocol maintains the health of its TCP connections (keep-alive messages) and the Go HTTP client automatically closes connections that have been idle for too long based on a timeout. ## How is this tested? Manually used this in a Databricks CLI binary and verified that the commands still work. NO_CHANGELOG=true --- httpclient/api_client.go | 2 -- 1 file changed, 2 deletions(-) diff --git a/httpclient/api_client.go b/httpclient/api_client.go index 8cfec0a01..80781d7b7 100644 --- a/httpclient/api_client.go +++ b/httpclient/api_client.go @@ -305,8 +305,6 @@ func (c *ApiClient) attempt( return &responseWrapper, nil } - // proactively release the connections in HTTP connection pool - c.httpClient.CloseIdleConnections() return c.handleError(ctx, err, requestBody) } } From c2e0b1c05034044387552e64978d8ee97317670c Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Thu, 20 Mar 2025 18:00:49 +0100 Subject: [PATCH 31/54] Use the same connection pool in all `ApiClient` by default. (#1190) ## What changes are proposed in this pull request? This PR changes `ApiClient` so that each instance uses the same `RoundTripper` by default. This change should improve performance in context where users rely on multiple instances of `ApiClient`. ## How is this tested? Normal CI. --- NEXT_CHANGELOG.md | 2 ++ httpclient/api_client.go | 22 +++++++++++++++------- 2 files changed, 17 insertions(+), 7 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 50a58aac7..4bcde6056 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -4,6 +4,8 @@ ### New Features and Improvements +- Instances of `ApiClient` now share the same connection pool by default ([PR #1190](https://github.com/databricks/databricks-sdk-go/pull/1190)). + ### Bug Fixes ### Documentation diff --git a/httpclient/api_client.go b/httpclient/api_client.go index 80781d7b7..7b2151717 100644 --- a/httpclient/api_client.go +++ b/httpclient/api_client.go @@ -42,10 +42,9 @@ type ClientConfig struct { Transport http.RoundTripper } -func (cfg ClientConfig) httpTransport() http.RoundTripper { - if cfg.Transport != nil { - return cfg.Transport - } +var defaultTransport = makeDefaultTransport() + +func makeDefaultTransport() *http.Transport { return &http.Transport{ Proxy: http.ProxyFromEnvironment, DialContext: (&net.Dialer{ @@ -58,12 +57,21 @@ func (cfg ClientConfig) httpTransport() http.RoundTripper { IdleConnTimeout: 180 * time.Second, TLSHandshakeTimeout: 30 * time.Second, ExpectContinueTimeout: 1 * time.Second, - TLSClientConfig: &tls.Config{ - InsecureSkipVerify: cfg.InsecureSkipVerify, - }, } } +func (cfg ClientConfig) httpTransport() http.RoundTripper { + if cfg.Transport != nil { + return cfg.Transport + } + if cfg.InsecureSkipVerify { + t := makeDefaultTransport() + t.TLSClientConfig = &tls.Config{InsecureSkipVerify: true} + return t + } + return defaultTransport +} + func NewApiClient(cfg ClientConfig) *ApiClient { cfg.HTTPTimeout = time.Duration(orDefault(int(cfg.HTTPTimeout), int(30*time.Second))) cfg.DebugTruncateBytes = orDefault(cfg.DebugTruncateBytes, 96) From e16cb324449859d4667373bac7ea637e2d4e5200 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Fri, 21 Mar 2025 11:11:20 +0100 Subject: [PATCH 32/54] Stop recommending opening an issue in case of unknown error (#1189) ## What changes are proposed in this pull request? This PR refactors the error parsing logic so that the SDK stop recommending reporting an issue in case of unexpected error. The rationale is that we do not have control over the error types that middleware might return. It is thus not possible to reliably identify if a parsing issue is due to a limitation of the SDK or to the error itself. This PR also slightly simplifies the code. ## How is this tested? Normal CI. --- NEXT_CHANGELOG.md | 3 + apierr/errors.go | 161 ++++++++++++----------------------------- apierr/errors_test.go | 8 +- httpclient/response.go | 21 +++++- 4 files changed, 72 insertions(+), 121 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 4bcde6056..36350246f 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -12,4 +12,7 @@ ### Internal Changes +- Stop recommending users to report an issue when the SDK encounters an unknown + error ([PR #1189](https://github.com/databricks/databricks-sdk-go/pull/1189)). + ### API Changes diff --git a/apierr/errors.go b/apierr/errors.go index 2ea8e2a39..df17eabe3 100644 --- a/apierr/errors.go +++ b/apierr/errors.go @@ -7,13 +7,11 @@ import ( "fmt" "io" "net/http" - "net/url" "regexp" "strings" "github.com/databricks/databricks-sdk-go/common" "github.com/databricks/databricks-sdk-go/logger" - "github.com/databricks/databricks-sdk-go/logger/httplog" ) // Deprecated: Use [ErrorDetails] instead. @@ -58,6 +56,16 @@ func IsMissing(err error) bool { return errors.Is(err, ErrNotFound) } +// IsMissing tells if it is missing resource. +func (apiError *APIError) IsMissing() bool { + return errors.Is(apiError, ErrNotFound) +} + +// IsTooManyRequests shows rate exceeded limits. +func (apiError *APIError) IsTooManyRequests() bool { + return errors.Is(apiError, ErrTooManyRequests) +} + // GetErrorInfo returns all entries in the list of error details of type // `ErrorInfo`. // @@ -77,16 +85,6 @@ func GetErrorInfo(err error) []ErrorDetail { return filteredDetails } -// IsMissing tells if it is missing resource. -func (apiError *APIError) IsMissing() bool { - return errors.Is(apiError, ErrNotFound) -} - -// IsTooManyRequests shows rate exceeded limits. -func (apiError *APIError) IsTooManyRequests() bool { - return errors.Is(apiError, ErrTooManyRequests) -} - // IsRetriable returns true if error is retriable. func (apiError *APIError) IsRetriable(ctx context.Context) bool { if apiError.IsTooManyRequests() { @@ -109,93 +107,61 @@ func (apiError *APIError) IsRetriable(ctx context.Context) bool { return false } -// NotFound returns properly formatted Not Found error. -func NotFound(message string) *APIError { - return &APIError{ - ErrorCode: "NOT_FOUND", - StatusCode: 404, - Message: message, - } -} - -func ReadError(statusCode int, err error) *APIError { - return &APIError{ - ErrorCode: "IO_READ", - StatusCode: statusCode, - Message: err.Error(), - } -} - -func TooManyRequests() *APIError { - return &APIError{ - ErrorCode: "TOO_MANY_REQUESTS", - StatusCode: 429, - Message: "Current request has to be retried", - } -} - -func GenericIOError(ue *url.Error) *APIError { - return &APIError{ - ErrorCode: "IO_ERROR", - StatusCode: 523, - Message: ue.Error(), - } -} - -// GetAPIError inspects HTTP errors from the Databricks API for known transient -// errors. +// GetAPIError returns the API error from the response. If the response is not +// an error, it returns nil. func GetAPIError(ctx context.Context, resp common.ResponseWrapper) error { - if resp.Response.StatusCode == 429 { - return TooManyRequests() - } + // Responses in the 2xx and 3xx ranges are not standard Databricks errors. if resp.Response.StatusCode >= 400 { - // read in response body as it is actually an error - responseBodyBytes, err := io.ReadAll(resp.ReadCloser) - if err != nil { - return ReadError(resp.Response.StatusCode, err) - } - apiError := parseErrorFromResponse(ctx, resp.Response, resp.RequestBody.DebugBytes, responseBodyBytes) + apiError := parseErrorFromResponse(ctx, resp) applyOverrides(ctx, apiError, resp.Response) return apiError } - // Attempts to access private link workspaces are redirected to the login page with a specific query parameter. + + // Return an error if the response indicates that the request tried to + // access a private link workspace without proper access. requestUrl := resp.Response.Request.URL if isPrivateLinkRedirect(requestUrl) { return privateLinkValidationError(requestUrl) } - return nil -} -// errorParser attempts to parse the error from the response body. If successful, -// it returns a non-nil *APIError. It returns nil if parsing fails or no error is found. -type errorParser func(context.Context, *http.Response, []byte) *APIError - -// errorParsers is a list of errorParser functions that are tried in order to -// parse an API error from a response body. Most errors should be parsable by -// the standardErrorParser, but additional parsers can be added here for -// specific error formats. The order of the parsers is not important, as the set -// of errors that can be parsed by each parser should be disjoint. -var errorParsers = []errorParser{ - standardErrorParser, - stringErrorParser, - htmlErrorParser, + return nil // not an error } -func parseErrorFromResponse(ctx context.Context, resp *http.Response, requestBody, responseBody []byte) *APIError { - if len(responseBody) == 0 { +func parseErrorFromResponse(ctx context.Context, resp common.ResponseWrapper) *APIError { + errorBody, err := io.ReadAll(resp.ReadCloser) + if err != nil { return &APIError{ - Message: http.StatusText(resp.StatusCode), - StatusCode: resp.StatusCode, + ErrorCode: "IO_READ", + StatusCode: resp.Response.StatusCode, + Message: err.Error(), } } - for _, parser := range errorParsers { - if apiError := parser(ctx, resp, responseBody); apiError != nil { - return apiError + if len(errorBody) == 0 { + return &APIError{ + Message: http.StatusText(resp.Response.StatusCode), + StatusCode: resp.Response.StatusCode, } } - return unknownAPIError(resp, requestBody, responseBody) + if err := standardErrorParser(ctx, resp.Response, errorBody); err != nil { + return err + } + if err := stringErrorParser(ctx, resp.Response, errorBody); err != nil { + return err + } + if err := htmlErrorParser(ctx, resp.Response, errorBody); err != nil { + return err + } + + // Unknown error response typically come from API gateways, load balancers, + // and other middlewares. These responses are not expected to be standard + // Databricks API errors. + return &APIError{ + ErrorCode: "UNKNOWN", + StatusCode: resp.Response.StatusCode, + Message: string(errorBody), + } } // standardErrorParser is the default error parser for Databricks API errors. @@ -309,38 +275,3 @@ func htmlErrorParser(ctx context.Context, resp *http.Response, responseBody []by return apiErr } - -// unknownAPIError is a fallback error parser for unexpected error formats. -func unknownAPIError(resp *http.Response, requestBody, responseBody []byte) *APIError { - apiErr := &APIError{ - StatusCode: resp.StatusCode, - Message: "unable to parse response. " + MakeUnexpectedResponse(resp, requestBody, responseBody), - } - - // Preserve status computation from htmlErrorParser in case of unknown error - statusParts := strings.SplitN(resp.Status, " ", 2) - if len(statusParts) < 2 { - apiErr.ErrorCode = "UNKNOWN" - } else { - apiErr.ErrorCode = strings.ReplaceAll(strings.ToUpper(strings.Trim(statusParts[1], " .")), " ", "_") - } - - return apiErr -} - -func MakeUnexpectedResponse(resp *http.Response, requestBody, responseBody []byte) string { - var req *http.Request - if resp != nil { - req = resp.Request - } - rts := httplog.RoundTripStringer{ - Request: req, - Response: resp, - RequestBody: requestBody, - ResponseBody: responseBody, - DebugHeaders: true, - DebugTruncateBytes: 10 * 1024, - DebugAuthorizationHeader: false, - } - return fmt.Sprintf("This is likely a bug in the Databricks SDK for Go or the underlying REST API. Please report this issue with the following debugging information to the SDK issue tracker at https://github.com/databricks/databricks-sdk-go/issues. Request log:\n```\n%s\n```", rts.String()) -} diff --git a/apierr/errors_test.go b/apierr/errors_test.go index 457882add..2dffa6b0a 100644 --- a/apierr/errors_test.go +++ b/apierr/errors_test.go @@ -156,11 +156,11 @@ func TestGetAPIError(t *testing.T) { wantErrorIs: ErrResourceDoesNotExist, }, { - name: "unexpected error", - resp: makeTestReponseWrapper(http.StatusInternalServerError, `unparsable error message`), + name: "unknown error message", + resp: makeTestReponseWrapper(http.StatusInternalServerError, "unknown error message"), want: &APIError{ - ErrorCode: "INTERNAL_SERVER_ERROR", - Message: "unable to parse response. This is likely a bug in the Databricks SDK for Go or the underlying REST API. Please report this issue with the following debugging information to the SDK issue tracker at https://github.com/databricks/databricks-sdk-go/issues. Request log:\n```\nGET /api/2.0/myservice\n> * Host: \n< 500 Internal Server Error\n< unparsable error message\n```", + ErrorCode: "UNKNOWN", // default error code + Message: "unknown error message", StatusCode: http.StatusInternalServerError, }, }, diff --git a/httpclient/response.go b/httpclient/response.go index 6b5448304..2a07fe2cd 100644 --- a/httpclient/response.go +++ b/httpclient/response.go @@ -11,9 +11,9 @@ import ( "strconv" "strings" - "github.com/databricks/databricks-sdk-go/apierr" "github.com/databricks/databricks-sdk-go/common" "github.com/databricks/databricks-sdk-go/logger" + "github.com/databricks/databricks-sdk-go/logger/httplog" ) func WithResponseHeader(key string, value *string) DoOption { @@ -92,7 +92,7 @@ func WithResponseUnmarshal(response any) DoOption { return nil } if err = json.Unmarshal(bodyBytes, &response); err != nil { - return fmt.Errorf("failed to unmarshal response body: %w. %s", err, apierr.MakeUnexpectedResponse(body.Response, body.RequestBody.DebugBytes, bodyBytes)) + return fmt.Errorf("failed to unmarshal response body: %w. %s", err, makeUnexpectedResponse(body.Response, body.RequestBody.DebugBytes, bodyBytes)) } return nil }, @@ -193,3 +193,20 @@ func parseHeaderTag(field reflect.StructField) headerTag { } return headerTag } + +func makeUnexpectedResponse(resp *http.Response, requestBody, responseBody []byte) string { + var req *http.Request + if resp != nil { + req = resp.Request + } + rts := httplog.RoundTripStringer{ + Request: req, + Response: resp, + RequestBody: requestBody, + ResponseBody: responseBody, + DebugHeaders: true, + DebugTruncateBytes: 10 * 1024, + DebugAuthorizationHeader: false, + } + return fmt.Sprintf("This is likely a bug in the Databricks SDK for Go or the underlying REST API. Please report this issue with the following debugging information to the SDK issue tracker at https://github.com/databricks/databricks-sdk-go/issues. Request log:\n```\n%s\n```", rts.String()) +} From 8bb47864074b5a221f44c9d4fbfd28997a549dfc Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Mon, 24 Mar 2025 10:37:22 +0100 Subject: [PATCH 33/54] [Feature] Implement U2M Authentication in the Go SDK (#1108) ## What changes are proposed in this pull request? This PR moves logic about U2M OAuth login from the CLI to the Go SDK. This eliminates a cyclic dependency between the SDK and CLI for interacting with the OAuth token cache and enables U2M support directly in the Go SDK without need for the CLI to be installed. Most of this code is carried over from the CLI, but I have made specific refactors to generalize it where needed. Currently, the token cache key follows a specific structure: - `https:///oidc/accounts/` for account-based sessions - `https://` for workspace-based sessions This can be generalized to allow callers to cache tokens in other manners. For example, users may want to cache tokens per principal or per set of OAuth scopes for their own OAuth applications. Additionally, products should be able to use an isolated token cache or a token cache backed by a different storage medium, like a database. This is simple to do by introducing a token cache interface. Implementers can define their own credential strategy and reuse the PersistentAuth type to handle the negotiation. ## How is this tested? Carried over tests from the CLI. More tests are forthcoming, once a decision is made on this approach. --- NEXT_CHANGELOG.md | 1 + config/auth_databricks_cli.go | 123 ------- config/auth_databricks_cli_test.go | 108 ------ config/auth_default.go | 2 +- config/auth_m2m.go | 33 +- config/auth_m2m_test.go | 5 +- config/auth_u2m.go | 146 ++++++++ config/auth_u2m_test.go | 141 ++++++++ config/config.go | 20 ++ config/config_test.go | 35 ++ config/credentials/credentials.go | 13 - config/in_memory_test.go | 27 ++ credentials/u2m/account_oauth_argument.go | 54 +++ credentials/u2m/cache/cache.go | 32 ++ credentials/u2m/cache/file.go | 179 ++++++++++ credentials/u2m/cache/file_test.go | 66 ++++ credentials/u2m/callback.go | 138 ++++++++ credentials/u2m/doc.go | 41 +++ credentials/u2m/endpoint_supplier.go | 60 ++++ credentials/u2m/endpoint_supplier_test.go | 37 ++ credentials/u2m/error.go | 8 + credentials/u2m/oauth_argument.go | 11 + credentials/u2m/page.tmpl | 104 ++++++ credentials/u2m/persistent_auth.go | 371 ++++++++++++++++++++ credentials/u2m/persistent_auth_test.go | 296 ++++++++++++++++ credentials/u2m/workspace_oauth_argument.go | 51 +++ go.mod | 3 +- go.sum | 3 + httpclient/fixtures/fixture.go | 2 + httpclient/oauth_token.go | 27 +- httpclient/request_test.go | 2 +- 31 files changed, 1853 insertions(+), 286 deletions(-) delete mode 100644 config/auth_databricks_cli.go delete mode 100644 config/auth_databricks_cli_test.go create mode 100644 config/auth_u2m.go create mode 100644 config/auth_u2m_test.go create mode 100644 config/in_memory_test.go create mode 100644 credentials/u2m/account_oauth_argument.go create mode 100644 credentials/u2m/cache/cache.go create mode 100644 credentials/u2m/cache/file.go create mode 100644 credentials/u2m/cache/file_test.go create mode 100644 credentials/u2m/callback.go create mode 100644 credentials/u2m/doc.go create mode 100644 credentials/u2m/endpoint_supplier.go create mode 100644 credentials/u2m/endpoint_supplier_test.go create mode 100644 credentials/u2m/error.go create mode 100644 credentials/u2m/oauth_argument.go create mode 100644 credentials/u2m/page.tmpl create mode 100644 credentials/u2m/persistent_auth.go create mode 100644 credentials/u2m/persistent_auth_test.go create mode 100644 credentials/u2m/workspace_oauth_argument.go diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 36350246f..3d3adac6e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -4,6 +4,7 @@ ### New Features and Improvements +* Support user-to-machine authentication in the SDK ([#1108](https://github.com/databricks/databricks-sdk-go/pull/1108)). - Instances of `ApiClient` now share the same connection pool by default ([PR #1190](https://github.com/databricks/databricks-sdk-go/pull/1190)). ### Bug Fixes diff --git a/config/auth_databricks_cli.go b/config/auth_databricks_cli.go deleted file mode 100644 index 1ec798307..000000000 --- a/config/auth_databricks_cli.go +++ /dev/null @@ -1,123 +0,0 @@ -package config - -import ( - "context" - "encoding/json" - "errors" - "fmt" - "os" - "os/exec" - "path/filepath" - "strings" - - "github.com/databricks/databricks-sdk-go/config/credentials" - "github.com/databricks/databricks-sdk-go/logger" - "golang.org/x/oauth2" -) - -type DatabricksCliCredentials struct { -} - -func (c DatabricksCliCredentials) Name() string { - return "databricks-cli" -} - -func (c DatabricksCliCredentials) Configure(ctx context.Context, cfg *Config) (credentials.CredentialsProvider, error) { - if cfg.Host == "" { - return nil, nil - } - - ts, err := newDatabricksCliTokenSource(ctx, cfg) - if err != nil { - if errors.Is(err, exec.ErrNotFound) { - logger.Debugf(ctx, "Most likely the Databricks CLI is not installed") - return nil, nil - } - if err == errLegacyDatabricksCli { - logger.Debugf(ctx, "Databricks CLI version <0.100.0 detected") - return nil, nil - } - return nil, err - } - - _, err = ts.Token() - if err != nil { - if strings.Contains(err.Error(), "no configuration file found at") { - // databricks auth token produced this error message between - // v0.207.1 and v0.209.1 - return nil, nil - } - if strings.Contains(err.Error(), "databricks OAuth is not") { - // OAuth is not configured or not supported - return nil, nil - } - return nil, err - } - logger.Debugf(ctx, "Using Databricks CLI authentication with Databricks OAuth tokens") - visitor := refreshableVisitor(ts) - return credentials.NewOAuthCredentialsProvider(visitor, ts.Token), nil -} - -var errLegacyDatabricksCli = errors.New("legacy Databricks CLI detected") - -type databricksCliTokenSource struct { - ctx context.Context - name string - args []string -} - -func newDatabricksCliTokenSource(ctx context.Context, cfg *Config) (*databricksCliTokenSource, error) { - args := []string{"auth", "token", "--host", cfg.Host} - - if cfg.IsAccountClient() { - args = append(args, "--account-id", cfg.AccountID) - } - - databricksCliPath := cfg.DatabricksCliPath - if databricksCliPath == "" { - databricksCliPath = "databricks" - } - - // Resolve absolute path to the Databricks CLI executable. - path, err := exec.LookPath(databricksCliPath) - if err != nil { - return nil, err - } - - // Resolve symlinks in order to figure out executable size. - path, err = filepath.EvalSymlinks(path) - if err != nil { - return nil, err - } - - // Determine executable size as signal to determine old/new Databricks CLI. - stat, err := os.Stat(path) - if err != nil { - return nil, err - } - - // The new Databricks CLI is a single binary with size > 1MB. - // We use the size as a signal to determine which Databricks CLI is installed. - if stat.Size() < (1024 * 1024) { - return nil, errLegacyDatabricksCli - } - - return &databricksCliTokenSource{ctx: ctx, name: path, args: args}, nil -} - -func (ts *databricksCliTokenSource) Token() (*oauth2.Token, error) { - out, err := runCommand(ts.ctx, ts.name, ts.args) - if ee, ok := err.(*exec.ExitError); ok { - return nil, fmt.Errorf("cannot get access token: %s", string(ee.Stderr)) - } - if err != nil { - return nil, fmt.Errorf("cannot get access token: %v", err) - } - var t oauth2.Token - err = json.Unmarshal(out, &t) - if err != nil { - return nil, fmt.Errorf("cannot unmarshal Databricks CLI result: %w", err) - } - logger.Infof(context.Background(), "Refreshed OAuth token from Databricks CLI, expires on %s", t.Expiry) - return &t, nil -} diff --git a/config/auth_databricks_cli_test.go b/config/auth_databricks_cli_test.go deleted file mode 100644 index 5566d93c0..000000000 --- a/config/auth_databricks_cli_test.go +++ /dev/null @@ -1,108 +0,0 @@ -package config - -import ( - "context" - "os" - "path/filepath" - "testing" - - "github.com/databricks/databricks-sdk-go/internal/env" - "github.com/stretchr/testify/assert" - "github.com/stretchr/testify/require" -) - -var cliDummy = &Config{Host: "https://abc.cloud.databricks.com/"} - -func writeSmallDummyExecutable(t *testing.T, path string) { - f, err := os.Create(filepath.Join(path, "databricks")) - require.NoError(t, err) - defer f.Close() - err = os.Chmod(f.Name(), 0755) - require.NoError(t, err) - _, err = f.WriteString("#!/bin/sh\necho hello world\n") - require.NoError(t, err) -} - -func writeLargeDummyExecutable(t *testing.T, path string) { - f, err := os.Create(filepath.Join(path, "databricks")) - require.NoError(t, err) - defer f.Close() - err = os.Chmod(f.Name(), 0755) - require.NoError(t, err) - _, err = f.WriteString("#!/bin/sh\n") - require.NoError(t, err) - - f.WriteString(` -cat </oidc/accounts/". +func (a BasicAccountOAuthArgument) GetCacheKey() string { + return fmt.Sprintf("%s/oidc/accounts/%s", a.accountHost, a.accountID) +} diff --git a/credentials/u2m/cache/cache.go b/credentials/u2m/cache/cache.go new file mode 100644 index 000000000..6e4137211 --- /dev/null +++ b/credentials/u2m/cache/cache.go @@ -0,0 +1,32 @@ +/* +Package cache provides an interface for storing and looking up OAuth tokens. + +The cache should be primarily used for user-to-machine (U2M) OAuth flows. In U2M +OAuth flows, the application needs to store the token for later use, such as in +a separate process, and the cache provides a way to do so without requiring the +user to follow the OAuth flow again. + +In machine-to-machine (M2M) OAuth flows, the application is configured with a +secret and can fetch a new token on demand without user interaction, so the +token cache is not necessary. +*/ +package cache + +import ( + "errors" + + "golang.org/x/oauth2" +) + +// TokenCache is an interface for storing and looking up OAuth tokens. +type TokenCache interface { + // Store stores the token with the given key, replacing any existing token. + // If t is nil, it deletes the token. + Store(key string, t *oauth2.Token) error + + // Lookup looks up the token with the given key. If the token is not found, it + // returns ErrNotConfigured. + Lookup(key string) (*oauth2.Token, error) +} + +var ErrNotConfigured = errors.New("databricks OAuth is not configured for this host") diff --git a/credentials/u2m/cache/file.go b/credentials/u2m/cache/file.go new file mode 100644 index 000000000..5652f4833 --- /dev/null +++ b/credentials/u2m/cache/file.go @@ -0,0 +1,179 @@ +package cache + +import ( + "encoding/json" + "fmt" + "os" + "path/filepath" + "sync" + + "golang.org/x/oauth2" +) + +const ( + // tokenCacheFile is the path of the default token cache, relative to the + // user's home directory. + tokenCacheFilePath = ".databricks/token-cache.json" + + // ownerExecReadWrite is the permission for the .databricks directory. + ownerExecReadWrite = 0o700 + + // ownerReadWrite is the permission for the token-cache.json file. + ownerReadWrite = 0o600 + + // tokenCacheVersion is the version of the token cache file format. + // + // Version 1 format: + // + // { + // "version": 1, + // "tokens": { + // "": { + // "access_token": "", + // "token_type": "", + // "refresh_token": "", + // "expiry": "" + // } + // } + // } + tokenCacheVersion = 1 +) + +// tokenCacheFile is the format of the token cache file. +type tokenCacheFile struct { + Version int `json:"version"` + Tokens map[string]*oauth2.Token `json:"tokens"` +} + +type FileTokenCacheOption func(*fileTokenCache) + +func WithFileLocation(fileLocation string) FileTokenCacheOption { + return func(c *fileTokenCache) { + c.fileLocation = fileLocation + } +} + +// fileTokenCache caches tokens in "~/.databricks/token-cache.json". fileTokenCache +// implements the TokenCache interface. +type fileTokenCache struct { + fileLocation string + + // locker protects the token cache file from concurrent reads and writes. + locker sync.Mutex +} + +// NewFileTokenCache creates a new FileTokenCache. By default, the cache is +// stored in "~/.databricks/token-cache.json". The cache file is created if it +// does not already exist. The cache file is created with owner permissions +// 0600 and the directory is created with owner permissions 0700. If the cache +// file is corrupt or if its version does not match tokenCacheVersion, an error +// is returned. +func NewFileTokenCache(opts ...FileTokenCacheOption) (TokenCache, error) { + c := &fileTokenCache{} + for _, opt := range opts { + opt(c) + } + if err := c.init(); err != nil { + return nil, err + } + // Fail fast if the cache is not working. + if _, err := c.load(); err != nil { + return nil, fmt.Errorf("load: %w", err) + } + return c, nil +} + +// Store implements the TokenCache interface. +func (c *fileTokenCache) Store(key string, t *oauth2.Token) error { + c.locker.Lock() + defer c.locker.Unlock() + f, err := c.load() + if err != nil { + return fmt.Errorf("load: %w", err) + } + if f.Tokens == nil { + f.Tokens = map[string]*oauth2.Token{} + } + if t == nil { + delete(f.Tokens, key) + } else { + f.Tokens[key] = t + } + raw, err := json.MarshalIndent(f, "", " ") + if err != nil { + return fmt.Errorf("marshal: %w", err) + } + return os.WriteFile(c.fileLocation, raw, ownerReadWrite) +} + +// Lookup implements the TokenCache interface. +func (c *fileTokenCache) Lookup(key string) (*oauth2.Token, error) { + c.locker.Lock() + defer c.locker.Unlock() + f, err := c.load() + if err != nil { + return nil, fmt.Errorf("load: %w", err) + } + t, ok := f.Tokens[key] + if !ok { + return nil, ErrNotConfigured + } + return t, nil +} + +// init initializes the token cache file. It creates the file and directory if +// they do not already exist. +func (c *fileTokenCache) init() error { + // set the default file location + if c.fileLocation == "" { + home, err := os.UserHomeDir() + if err != nil { + return fmt.Errorf("failed loading home directory: %w", err) + } + c.fileLocation = filepath.Join(home, tokenCacheFilePath) + } + // Create the cache file if it does not exist. + if _, err := os.Stat(c.fileLocation); err != nil { + if !os.IsNotExist(err) { + return fmt.Errorf("stat file: %w", err) + } + // Create the parent directories if needed. + if err := os.MkdirAll(filepath.Dir(c.fileLocation), ownerExecReadWrite); err != nil { + return fmt.Errorf("mkdir: %w", err) + } + + // Create an empty cache file. + f := &tokenCacheFile{ + Version: tokenCacheVersion, + Tokens: map[string]*oauth2.Token{}, + } + raw, err := json.MarshalIndent(f, "", " ") + if err != nil { + return fmt.Errorf("marshal: %w", err) + } + if err := os.WriteFile(c.fileLocation, raw, ownerReadWrite); err != nil { + return fmt.Errorf("write: %w", err) + } + } + return nil +} + +// load loads the token cache file from disk. If the file is corrupt or if its +// version does not match tokenCacheVersion, it returns an error. +func (c *fileTokenCache) load() (*tokenCacheFile, error) { + raw, err := os.ReadFile(c.fileLocation) + if err != nil { + return nil, fmt.Errorf("read: %w", err) + } + f := &tokenCacheFile{} + if err := json.Unmarshal(raw, &f); err != nil { + return nil, fmt.Errorf("parse: %w", err) + } + if f.Version != tokenCacheVersion { + // in the later iterations we could do state upgraders, + // so that we transform token cache from v1 to v2 without + // losing the tokens and asking the user to re-authenticate. + return nil, fmt.Errorf("needs version %d, got version %d", tokenCacheVersion, f.Version) + } + return f, nil +} diff --git a/credentials/u2m/cache/file_test.go b/credentials/u2m/cache/file_test.go new file mode 100644 index 000000000..ef45820b5 --- /dev/null +++ b/credentials/u2m/cache/file_test.go @@ -0,0 +1,66 @@ +package cache + +import ( + "os" + "path/filepath" + "testing" + + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/oauth2" +) + +func setup(t *testing.T) string { + tempHomeDir := t.TempDir() + return filepath.Join(tempHomeDir, "token-cache.json") +} + +func TestStoreAndLookup(t *testing.T) { + c, err := NewFileTokenCache(WithFileLocation(setup(t))) + require.NoError(t, err) + err = c.Store("x", &oauth2.Token{ + AccessToken: "abc", + }) + require.NoError(t, err) + + err = c.Store("y", &oauth2.Token{ + AccessToken: "bcd", + }) + require.NoError(t, err) + + tok, err := c.Lookup("x") + require.NoError(t, err) + assert.Equal(t, "abc", tok.AccessToken) + + _, err = c.Lookup("z") + assert.Equal(t, ErrNotConfigured, err) +} + +func TestNoCacheFileReturnsErrNotConfigured(t *testing.T) { + l, err := NewFileTokenCache(WithFileLocation(setup(t))) + require.NoError(t, err) + _, err = l.Lookup("x") + assert.Equal(t, ErrNotConfigured, err) +} + +func TestLoadCorruptFile(t *testing.T) { + f := setup(t) + err := os.MkdirAll(filepath.Dir(f), ownerExecReadWrite) + require.NoError(t, err) + err = os.WriteFile(f, []byte("abc"), ownerExecReadWrite) + require.NoError(t, err) + + _, err = NewFileTokenCache(WithFileLocation(f)) + assert.EqualError(t, err, "load: parse: invalid character 'a' looking for beginning of value") +} + +func TestLoadWrongVersion(t *testing.T) { + f := setup(t) + err := os.MkdirAll(filepath.Dir(f), ownerExecReadWrite) + require.NoError(t, err) + err = os.WriteFile(f, []byte(`{"version": 823, "things": []}`), ownerExecReadWrite) + require.NoError(t, err) + + _, err = NewFileTokenCache(WithFileLocation(f)) + assert.EqualError(t, err, "load: needs version 1, got version 823") +} diff --git a/credentials/u2m/callback.go b/credentials/u2m/callback.go new file mode 100644 index 000000000..aab576a04 --- /dev/null +++ b/credentials/u2m/callback.go @@ -0,0 +1,138 @@ +package u2m + +import ( + "context" + _ "embed" + "fmt" + "html/template" + "net/http" + "strings" + + "golang.org/x/text/cases" + "golang.org/x/text/language" +) + +//go:embed page.tmpl +var pageTmpl string + +type oauthResult struct { + Error string + ErrorDescription string + State string + Code string + Host string +} + +// callbackServer is a server that listens for the redirect from the Databricks +// identity provider. It renders a page.html template that shows the result of +// the authentication attempt. +type callbackServer struct { + // ctx is the context used when waiting for the redirect from the identity + // provider. This is needed because the Handler() method from the oauth2 + // library does not accept a context. + ctx context.Context + + // srv is the server that listens for the redirect from the identity provider. + srv http.Server + + // browser is a function that opens a browser to the given URL. + browser func(string) error + + // arg is the OAuth argument used to authenticate. + arg OAuthArgument + + // renderErrCh is a channel that receives an error if there is an error + // rendering the page.html template. + renderErrCh chan error + + // feedbackCh is a channel that receives the result of the authentication + // attempt. + feedbackCh chan oauthResult + + // tmpl is the template used to render the response page after the user is + // redirected back to the callback server. + tmpl *template.Template +} + +// newCallbackServer creates a new callback server that listens for the redirect +// from the Databricks identity provider. +func (a *PersistentAuth) newCallbackServer() (*callbackServer, error) { + tmpl, err := template.New("page").Funcs(template.FuncMap{ + "title": func(in string) string { + title := cases.Title(language.English) + return title.String(strings.ReplaceAll(in, "_", " ")) + }, + }).Parse(pageTmpl) + if err != nil { + return nil, err + } + cb := &callbackServer{ + feedbackCh: make(chan oauthResult), + renderErrCh: make(chan error), + tmpl: tmpl, + ctx: a.ctx, + browser: a.browser, + arg: a.oAuthArgument, + } + cb.srv.Handler = cb + go func() { + _ = cb.srv.Serve(a.ln) + }() + return cb, nil +} + +// Close closes the callback server. +func (cb *callbackServer) Close() error { + return cb.srv.Close() +} + +// ServeHTTP renders the page.html template. +func (cb *callbackServer) ServeHTTP(w http.ResponseWriter, r *http.Request) { + res := oauthResult{ + Error: r.FormValue("error"), + ErrorDescription: r.FormValue("error_description"), + Code: r.FormValue("code"), + State: r.FormValue("state"), + Host: cb.getHost(), + } + if res.Error != "" { + w.WriteHeader(http.StatusBadRequest) + } else { + w.WriteHeader(http.StatusOK) + } + err := cb.tmpl.Execute(w, res) + if err != nil { + cb.renderErrCh <- err + } + cb.feedbackCh <- res +} + +func (cb *callbackServer) getHost() string { + switch a := cb.arg.(type) { + case AccountOAuthArgument: + return a.GetAccountHost() + case WorkspaceOAuthArgument: + return a.GetWorkspaceHost() + default: + return "" + } +} + +// Handler opens up a browser waits for redirect to come back from the identity provider +func (cb *callbackServer) Handler(authCodeURL string) (string, string, error) { + err := cb.browser(authCodeURL) + if err != nil { + fmt.Printf("Please open %s in the browser to continue authentication", authCodeURL) + } + select { + case <-cb.ctx.Done(): + return "", "", cb.ctx.Err() + case renderErr := <-cb.renderErrCh: + return "", "", renderErr + case res := <-cb.feedbackCh: + if res.Error != "" { + return "", "", fmt.Errorf("%s: %s", res.Error, res.ErrorDescription) + } + return res.Code, res.State, nil + } +} diff --git a/credentials/u2m/doc.go b/credentials/u2m/doc.go new file mode 100644 index 000000000..85835347f --- /dev/null +++ b/credentials/u2m/doc.go @@ -0,0 +1,41 @@ +/* +Package u2m supports the user-to-machine (U2M) OAuth flow for authenticating with Databricks. + +Databricks uses the authorization code flow from OAuth 2.0 to authenticate users. This flow +consists of four steps: + 1. Retrieve an authorization code for a user by opening a browser and directing them to the + Databricks authorization URL. + 2. Exchange the authorization code for an access token. + 3. Use the access token to authenticate with Databricks. + 4. When the access token expires, use the refresh token to get a new access token. + +The token and authorization endpoints for Databricks vary depending on whether the host is +an account- or workspace-level host. Account-level endpoints are fixed based on the account +ID and host, while workspace-level endpoints are discovered using the OIDC discovery endpoint +at /oidc/.well-known/oauth-authorization-server. + +To trigger the authorization flow, construct a PersistentAuth object and call the +Challenge() method: + + auth, err := oauth.NewPersistentAuth(ctx) + if err != nil { + log.Fatalf("failed to create persistent auth: %v", err) + } + token, err := auth.Challenge(ctx, oauth.BasicAccountOAuthArgument{ + AccountHost: "https://accounts.cloud.databricks.com", + AccountID: "xyz", + }) + +Because the U2M flow requires user interaction, the resulting access token and refresh token +can be stored in a persistent cache to avoid prompting the user for credentials on every +authentication attempt. By default, the cache is stored in ~/.databricks/token-cache.json. +Retrieve the cached token by calling the Load() method: + + token, err := auth.Load(ctx, oauth.BasicAccountOAuthArgument{ + AccountHost: "https://accounts.cloud.databricks.com", + AccountID: "xyz", + }) + +See the cache package for more information on customizing the cache. +*/ +package u2m diff --git a/credentials/u2m/endpoint_supplier.go b/credentials/u2m/endpoint_supplier.go new file mode 100644 index 000000000..fb5c48c24 --- /dev/null +++ b/credentials/u2m/endpoint_supplier.go @@ -0,0 +1,60 @@ +package u2m + +import ( + "context" + "errors" + "fmt" + + "github.com/databricks/databricks-sdk-go/httpclient" +) + +// OAuthEndpointSupplier provides the http functionality needed for interacting with the +// Databricks OAuth APIs. +type OAuthEndpointSupplier interface { + // GetWorkspaceOAuthEndpoints returns the OAuth2 endpoints for the workspace. + GetWorkspaceOAuthEndpoints(ctx context.Context, workspaceHost string) (*OAuthAuthorizationServer, error) + + // GetAccountOAuthEndpoints returns the OAuth2 endpoints for the account. + GetAccountOAuthEndpoints(ctx context.Context, accountHost string, accountId string) (*OAuthAuthorizationServer, error) +} + +// BasicOAuthEndpointSupplier is an implementation of the OAuthEndpointSupplier interface. +type BasicOAuthEndpointSupplier struct { + // Client is the ApiClient to use for making HTTP requests. + Client *httpclient.ApiClient +} + +// GetWorkspaceOAuthEndpoints returns the OAuth endpoints for the given workspace. +// It queries the OIDC discovery endpoint to get the OAuth endpoints using the +// provided ApiClient. +func (c *BasicOAuthEndpointSupplier) GetWorkspaceOAuthEndpoints(ctx context.Context, workspaceHost string) (*OAuthAuthorizationServer, error) { + oidc := fmt.Sprintf("%s/oidc/.well-known/oauth-authorization-server", workspaceHost) + var oauthEndpoints OAuthAuthorizationServer + if err := c.Client.Do(ctx, "GET", oidc, httpclient.WithResponseUnmarshal(&oauthEndpoints)); err != nil { + return nil, ErrOAuthNotSupported + } + return &oauthEndpoints, nil +} + +// GetAccountOAuthEndpoints returns the OAuth2 endpoints for the account. The +// account-level OAuth endpoints are fixed based on the account ID and host. +func (c *BasicOAuthEndpointSupplier) GetAccountOAuthEndpoints(ctx context.Context, accountHost string, accountId string) (*OAuthAuthorizationServer, error) { + return &OAuthAuthorizationServer{ + AuthorizationEndpoint: fmt.Sprintf("%s/oidc/accounts/%s/v1/authorize", accountHost, accountId), + TokenEndpoint: fmt.Sprintf("%s/oidc/accounts/%s/v1/token", accountHost, accountId), + }, nil +} + +var ErrOAuthNotSupported = errors.New("databricks OAuth is not supported for this host") + +// OAuthAuthorizationServer contains the OAuth endpoints for a Databricks account +// or workspace. +type OAuthAuthorizationServer struct { + // AuthorizationEndpoint is the URL to redirect users to for authorization. + // It typically ends with /v1/authroize. + AuthorizationEndpoint string `json:"authorization_endpoint"` + + // TokenEndpoint is the URL to exchange an authorization code for an access token. + // It typically ends with /v1/token. + TokenEndpoint string `json:"token_endpoint"` +} diff --git a/credentials/u2m/endpoint_supplier_test.go b/credentials/u2m/endpoint_supplier_test.go new file mode 100644 index 000000000..72106e91b --- /dev/null +++ b/credentials/u2m/endpoint_supplier_test.go @@ -0,0 +1,37 @@ +package u2m + +import ( + "context" + "testing" + + "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/httpclient/fixtures" + "github.com/stretchr/testify/assert" +) + +func TestBasicOAuthClient_GetAccountOAuthEndpoints(t *testing.T) { + c := &BasicOAuthEndpointSupplier{} + s, err := c.GetAccountOAuthEndpoints(context.Background(), "https://abc", "xyz") + assert.NoError(t, err) + assert.Equal(t, "https://abc/oidc/accounts/xyz/v1/authorize", s.AuthorizationEndpoint) + assert.Equal(t, "https://abc/oidc/accounts/xyz/v1/token", s.TokenEndpoint) +} + +func TestGetWorkspaceOAuthEndpoints(t *testing.T) { + p := httpclient.NewApiClient(httpclient.ClientConfig{ + Transport: fixtures.MappingTransport{ + "GET /oidc/.well-known/oauth-authorization-server": { + Status: 200, + Response: map[string]string{ + "authorization_endpoint": "a", + "token_endpoint": "b", + }, + }, + }, + }) + c := &BasicOAuthEndpointSupplier{Client: p} + endpoints, err := c.GetWorkspaceOAuthEndpoints(context.Background(), "https://abc") + assert.NoError(t, err) + assert.Equal(t, "a", endpoints.AuthorizationEndpoint) + assert.Equal(t, "b", endpoints.TokenEndpoint) +} diff --git a/credentials/u2m/error.go b/credentials/u2m/error.go new file mode 100644 index 000000000..be953b0c1 --- /dev/null +++ b/credentials/u2m/error.go @@ -0,0 +1,8 @@ +package u2m + +// InvalidRefreshTokenError is returned from PersistentAuth's Load() method +// if the access token has expired and the refresh token in the token cache +// is invalid. +type InvalidRefreshTokenError struct { + error +} diff --git a/credentials/u2m/oauth_argument.go b/credentials/u2m/oauth_argument.go new file mode 100644 index 000000000..f2d2ebc5d --- /dev/null +++ b/credentials/u2m/oauth_argument.go @@ -0,0 +1,11 @@ +package u2m + +// OAuthArgument is an interface that provides the necessary information to +// authenticate with PersistentAuth. Implementations of this interface must +// implement either the WorkspaceOAuthArgument or AccountOAuthArgument +// interface. +type OAuthArgument interface { + // GetCacheKey returns a unique key for the OAuthArgument. This key is used + // to store and retrieve the token from the token cache. + GetCacheKey() string +} diff --git a/credentials/u2m/page.tmpl b/credentials/u2m/page.tmpl new file mode 100644 index 000000000..1540222db --- /dev/null +++ b/credentials/u2m/page.tmpl @@ -0,0 +1,104 @@ + + + + + {{if .Error }}{{ .Error | title }}{{ else }}Success{{end}} + + + + + + + +
+
+ + +
{{ .Error | title }}
+
{{ .ErrorDescription }}
+ +
Authenticated
+ {{- if .Host }} +
Go to {{.Host}}
+ {{- end}} + +
+ You can close this tab. Or go to documentation +
+
+
+ + diff --git a/credentials/u2m/persistent_auth.go b/credentials/u2m/persistent_auth.go new file mode 100644 index 000000000..7e28a25f5 --- /dev/null +++ b/credentials/u2m/persistent_auth.go @@ -0,0 +1,371 @@ +package u2m + +import ( + "context" + "crypto/rand" + "crypto/sha256" + "encoding/base64" + "encoding/json" + "errors" + "fmt" + "net" + "net/http" + "time" + + cache "github.com/databricks/databricks-sdk-go/credentials/u2m/cache" + "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/logger" + "github.com/databricks/databricks-sdk-go/retries" + "github.com/pkg/browser" + "golang.org/x/oauth2" + "golang.org/x/oauth2/authhandler" +) + +const ( + // appClientId is the default client ID used by the SDK for U2M OAuth. + appClientID = "databricks-cli" + + // appRedirectAddr is the default address for the OAuth2 callback server. + appRedirectAddr = "localhost:8020" + + // listenerTimeout is the maximum amount of time to acquire listener on + // appRedirectAddr. + listenerTimeout = 45 * time.Second +) + +// PersistentAuth is an OAuth manager that handles the U2M OAuth flow. Tokens +// are stored in and looked up from the provided cache. Tokens include the +// refresh token. On load, if the access token is expired, it is refreshed +// using the refresh token. +// +// The PersistentAuth is safe for concurrent use. The token cache is locked +// during token retrieval, refresh and storage. +type PersistentAuth struct { + // cache is the token cache to store and lookup tokens. + cache cache.TokenCache + // client is the HTTP client to use for OAuth2 requests. + client *http.Client + // endpointSupplier is the HTTP endpointSupplier to use for OAuth2 requests. + endpointSupplier OAuthEndpointSupplier + // oAuthArgument defines the workspace or account to authenticate to and the + // cache key for the token. + oAuthArgument OAuthArgument + // browser is the function to open a URL in the default browser. + browser func(url string) error + // ln is the listener for the OAuth2 callback server. + ln net.Listener + // ctx is the context to use for underlying operations. This is needed in + // order to implement the oauth2.TokenSource interface. + ctx context.Context +} + +type PersistentAuthOption func(*PersistentAuth) + +// WithTokenCache sets the token cache for the PersistentAuth. +func WithTokenCache(c cache.TokenCache) PersistentAuthOption { + return func(a *PersistentAuth) { + a.cache = c + } +} + +// WithHttpClient sets the HTTP client for the PersistentAuth. +func WithHttpClient(c *http.Client) PersistentAuthOption { + return func(a *PersistentAuth) { + a.client = c + } +} + +// WithOAuthEndpointSupplier sets the OAuth endpoint supplier for the +// PersistentAuth. +func WithOAuthEndpointSupplier(c OAuthEndpointSupplier) PersistentAuthOption { + return func(a *PersistentAuth) { + a.endpointSupplier = c + } +} + +// WithOAuthArgument sets the OAuthArgument for the PersistentAuth. +func WithOAuthArgument(arg OAuthArgument) PersistentAuthOption { + return func(a *PersistentAuth) { + a.oAuthArgument = arg + } +} + +// WithBrowser sets the browser function for the PersistentAuth. +func WithBrowser(b func(url string) error) PersistentAuthOption { + return func(a *PersistentAuth) { + a.browser = b + } +} + +// NewPersistentAuth creates a new PersistentAuth with the provided options. +func NewPersistentAuth(ctx context.Context, opts ...PersistentAuthOption) (*PersistentAuth, error) { + p := &PersistentAuth{} + for _, opt := range opts { + opt(p) + } + // By default, PersistentAuth uses the default ApiClient to make HTTP + // requests. Furthermore, if the endpointSupplier is not provided, it uses + // this same client to fetch the OAuth endpoints. If the HTTP client is + // provided but the endpointSupplier is not, we construct a default + // ApiClient for use with BasicOAuthClient. + apiClient := httpclient.NewApiClient(httpclient.ClientConfig{}) + if p.client == nil { + p.client = &http.Client{ + Transport: apiClient, + // 30 seconds matches the default timeout of the ApiClient + Timeout: 30 * time.Second, + } + } + if p.endpointSupplier == nil { + p.endpointSupplier = &BasicOAuthEndpointSupplier{ + Client: apiClient, + } + } + if p.cache == nil { + var err error + p.cache, err = cache.NewFileTokenCache() + if err != nil { + return nil, fmt.Errorf("cache: %w", err) + } + } + if p.oAuthArgument == nil { + return nil, errors.New("missing OAuthArgument") + } + if err := p.validateArg(); err != nil { + return nil, err + } + if p.browser == nil { + p.browser = browser.OpenURL + } + p.ctx = ctx + return p, nil +} + +// Token loads the OAuth2 token for the given OAuthArgument from the cache. If +// the token is expired, it is refreshed using the refresh token. +func (a *PersistentAuth) Token() (t *oauth2.Token, err error) { + err = a.startListener(a.ctx) + if err != nil { + return nil, fmt.Errorf("starting listener: %w", err) + } + defer a.Close() + + key := a.oAuthArgument.GetCacheKey() + t, err = a.cache.Lookup(key) + if err != nil { + return nil, fmt.Errorf("cache: %w", err) + } + // refresh if invalid + if !t.Valid() { + t, err = a.refresh(t) + if err != nil { + return nil, fmt.Errorf("token refresh: %w", err) + } + } + // do not print refresh token to end-user + t.RefreshToken = "" + return t, nil +} + +// refresh refreshes the token for the given OAuthArgument, storing the new +// token in the cache. +func (a *PersistentAuth) refresh(oldToken *oauth2.Token) (*oauth2.Token, error) { + // OAuth2 config is invoked only for expired tokens to speed up + // the happy path in the token retrieval + cfg, err := a.oauth2Config() + if err != nil { + return nil, err + } + // make OAuth2 library use our client + ctx := a.setOAuthContext(a.ctx) + // eagerly refresh token + t, err := cfg.TokenSource(ctx, oldToken).Token() + if err != nil { + // The default RoundTripper of our httpclient.ApiClient returns an error + // if the response status code is not 2xx. This isn't compliant with the + // RoundTripper interface, so this error isn't handled by the oauth2 + // library. We need to handle it here. + var internalHttpError *httpclient.HttpError + if errors.As(err, &internalHttpError) { + // error fields + // https://datatracker.ietf.org/doc/html/rfc6749#section-5.2 + var errResponse struct { + Error string `json:"error"` + ErrorDescription string `json:"error_description"` + } + if unmarshalErr := json.Unmarshal([]byte(internalHttpError.Message), &errResponse); unmarshalErr != nil { + return nil, fmt.Errorf("unmarshal: %w", unmarshalErr) + } + // Invalid refresh tokens get their own error type so they can be + // better presented to users. + if errResponse.ErrorDescription == "Refresh token is invalid" { + return nil, &InvalidRefreshTokenError{err} + } + return nil, fmt.Errorf("%s (error code: %s)", errResponse.ErrorDescription, errResponse.Error) + } + + // Handle responses from well-behaved *http.Client implementations. + var httpErr *oauth2.RetrieveError + if errors.As(err, &httpErr) { + // Invalid refresh tokens get their own error type so they can be + // better presented to users. + if httpErr.ErrorDescription == "Refresh token is invalid" { + return nil, &InvalidRefreshTokenError{err} + } + return nil, fmt.Errorf("%s (error code: %s)", httpErr.ErrorDescription, httpErr.ErrorCode) + } + return nil, err + } + err = a.cache.Store(a.oAuthArgument.GetCacheKey(), t) + if err != nil { + return nil, fmt.Errorf("cache update: %w", err) + } + return t, nil +} + +// Challenge initiates the OAuth2 login flow for the given OAuthArgument. The +// OAuth2 flow is started by opening the browser to the OAuth2 authorization +// URL. The user is redirected to the callback server on appRedirectAddr. The +// callback server listens for the redirect from the identity provider and +// exchanges the authorization code for an access token. +func (a *PersistentAuth) Challenge() error { + err := a.startListener(a.ctx) + if err != nil { + return fmt.Errorf("starting listener: %w", err) + } + // The listener will be closed by the callback server automatically, but if + // the callback server is not created, we need to close the listener manually. + defer a.Close() + + cfg, err := a.oauth2Config() + if err != nil { + return fmt.Errorf("fetching oauth config: %w", err) + } + cb, err := a.newCallbackServer() + if err != nil { + return fmt.Errorf("callback server: %w", err) + } + defer cb.Close() + + state, pkce, err := a.stateAndPKCE() + if err != nil { + return fmt.Errorf("state and pkce: %w", err) + } + // make OAuth2 library use our client + ctx := a.setOAuthContext(a.ctx) + ts := authhandler.TokenSourceWithPKCE(ctx, cfg, state, cb.Handler, pkce) + t, err := ts.Token() + if err != nil { + return fmt.Errorf("authorize: %w", err) + } + // cache token identified by host (and possibly the account id) + err = a.cache.Store(a.oAuthArgument.GetCacheKey(), t) + if err != nil { + return fmt.Errorf("store: %w", err) + } + return nil +} + +// startListener starts a listener on appRedirectAddr, retrying if the address +// is already in use. +func (a *PersistentAuth) startListener(ctx context.Context) error { + listener, err := retries.Poll(ctx, listenerTimeout, + func() (*net.Listener, *retries.Err) { + var lc net.ListenConfig + l, err := lc.Listen(ctx, "tcp", appRedirectAddr) + if err != nil { + logger.Debugf(ctx, "failed to listen on %s: %v, retrying", appRedirectAddr, err) + return nil, retries.Continue(err) + } + return &l, nil + }) + if err != nil { + return fmt.Errorf("listener: %w", err) + } + a.ln = *listener + return nil +} + +func (a *PersistentAuth) Close() error { + if a.ln == nil { + return nil + } + return a.ln.Close() +} + +// validateArg ensures that the OAuthArgument is either a WorkspaceOAuthArgument +// or an AccountOAuthArgument. +func (a *PersistentAuth) validateArg() error { + _, isWorkspaceArg := a.oAuthArgument.(WorkspaceOAuthArgument) + _, isAccountArg := a.oAuthArgument.(AccountOAuthArgument) + if !isWorkspaceArg && !isAccountArg { + return fmt.Errorf("unsupported OAuthArgument type: %T, must implement either WorkspaceOAuthArgument or AccountOAuthArgument interface", a.oAuthArgument) + } + return nil +} + +// oauth2Config returns the OAuth2 configuration for the given OAuthArgument. +func (a *PersistentAuth) oauth2Config() (*oauth2.Config, error) { + scopes := []string{ + "offline_access", // ensures OAuth token includes refresh token + "all-apis", // ensures OAuth token has access to all control-plane APIs + } + var endpoints *OAuthAuthorizationServer + var err error + switch argg := a.oAuthArgument.(type) { + case WorkspaceOAuthArgument: + endpoints, err = a.endpointSupplier.GetWorkspaceOAuthEndpoints(a.ctx, argg.GetWorkspaceHost()) + case AccountOAuthArgument: + endpoints, err = a.endpointSupplier.GetAccountOAuthEndpoints( + a.ctx, argg.GetAccountHost(), argg.GetAccountId()) + default: + return nil, fmt.Errorf("unsupported OAuthArgument type: %T, must implement either WorkspaceOAuthArgument or AccountOAuthArgument interface", a.oAuthArgument) + } + if err != nil { + return nil, fmt.Errorf("fetching OAuth endpoints: %w", err) + } + return &oauth2.Config{ + ClientID: appClientID, + Endpoint: oauth2.Endpoint{ + AuthURL: endpoints.AuthorizationEndpoint, + TokenURL: endpoints.TokenEndpoint, + AuthStyle: oauth2.AuthStyleInParams, + }, + RedirectURL: fmt.Sprintf("http://%s", appRedirectAddr), + Scopes: scopes, + }, nil +} + +func (a *PersistentAuth) stateAndPKCE() (string, *authhandler.PKCEParams, error) { + verifier, err := a.randomString(64) + if err != nil { + return "", nil, fmt.Errorf("verifier: %w", err) + } + verifierSha256 := sha256.Sum256([]byte(verifier)) + challenge := base64.RawURLEncoding.EncodeToString(verifierSha256[:]) + state, err := a.randomString(16) + if err != nil { + return "", nil, fmt.Errorf("state: %w", err) + } + return state, &authhandler.PKCEParams{ + Challenge: challenge, + ChallengeMethod: "S256", + Verifier: verifier, + }, nil +} + +func (a *PersistentAuth) randomString(size int) (string, error) { + raw := make([]byte, size) + // ignore error as rand.Reader never returns an error + _, err := rand.Read(raw) + if err != nil { + return "", fmt.Errorf("rand.Read: %w", err) + } + return base64.RawURLEncoding.EncodeToString(raw), nil +} + +func (a *PersistentAuth) setOAuthContext(ctx context.Context) context.Context { + return context.WithValue(ctx, oauth2.HTTPClient, a.client) +} + +var _ oauth2.TokenSource = (*PersistentAuth)(nil) diff --git a/credentials/u2m/persistent_auth_test.go b/credentials/u2m/persistent_auth_test.go new file mode 100644 index 000000000..5aac4ecda --- /dev/null +++ b/credentials/u2m/persistent_auth_test.go @@ -0,0 +1,296 @@ +package u2m_test + +import ( + "context" + "errors" + "fmt" + "net/http" + "net/url" + "testing" + "time" + + "github.com/databricks/databricks-sdk-go/credentials/u2m" + "github.com/databricks/databricks-sdk-go/httpclient/fixtures" + "github.com/stretchr/testify/assert" + "github.com/stretchr/testify/require" + "golang.org/x/oauth2" +) + +type tokenCacheMock struct { + store func(key string, t *oauth2.Token) error + lookup func(key string) (*oauth2.Token, error) +} + +func (m *tokenCacheMock) Store(key string, t *oauth2.Token) error { + if m.store == nil { + panic("no store mock") + } + return m.store(key, t) +} + +func (m *tokenCacheMock) Lookup(key string) (*oauth2.Token, error) { + if m.lookup == nil { + panic("no lookup mock") + } + return m.lookup(key) +} + +func TestToken(t *testing.T) { + cache := &tokenCacheMock{ + lookup: func(key string) (*oauth2.Token, error) { + assert.Equal(t, "https://abc/oidc/accounts/xyz", key) + return &oauth2.Token{ + AccessToken: "bcd", + Expiry: time.Now().Add(1 * time.Minute), + }, nil + }, + } + arg, err := u2m.NewBasicAccountOAuthArgument("https://abc", "xyz") + assert.NoError(t, err) + p, err := u2m.NewPersistentAuth(context.Background(), u2m.WithTokenCache(cache), u2m.WithOAuthArgument(arg)) + require.NoError(t, err) + defer p.Close() + tok, err := p.Token() + assert.NoError(t, err) + assert.Equal(t, "bcd", tok.AccessToken) + assert.Equal(t, "", tok.RefreshToken) +} + +type MockOAuthEndpointSupplier struct{} + +func (m MockOAuthEndpointSupplier) GetAccountOAuthEndpoints(ctx context.Context, accountHost string, accountId string) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + AuthorizationEndpoint: fmt.Sprintf("%s/oidc/accounts/%s/v1/authorize", accountHost, accountId), + TokenEndpoint: fmt.Sprintf("%s/oidc/accounts/%s/v1/token", accountHost, accountId), + }, nil +} + +func (m MockOAuthEndpointSupplier) GetWorkspaceOAuthEndpoints(ctx context.Context, workspaceHost string) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + AuthorizationEndpoint: fmt.Sprintf("%s/oidc/v1/authorize", workspaceHost), + TokenEndpoint: fmt.Sprintf("%s/oidc/v1/token", workspaceHost), + }, nil +} + +func TestToken_RefreshesExpiredAccessToken(t *testing.T) { + ctx := context.Background() + expectedKey := "https://accounts.cloud.databricks.com/oidc/accounts/xyz" + cache := &tokenCacheMock{ + lookup: func(key string) (*oauth2.Token, error) { + assert.Equal(t, expectedKey, key) + return &oauth2.Token{ + AccessToken: "expired", + RefreshToken: "cde", + Expiry: time.Now().Add(-1 * time.Minute), + }, nil + }, + store: func(key string, tok *oauth2.Token) error { + assert.Equal(t, expectedKey, key) + assert.Equal(t, "def", tok.RefreshToken) + return nil + }, + } + arg, err := u2m.NewBasicAccountOAuthArgument("https://accounts.cloud.databricks.com", "xyz") + assert.NoError(t, err) + p, err := u2m.NewPersistentAuth( + ctx, + u2m.WithTokenCache(cache), + u2m.WithHttpClient(&http.Client{ + Transport: fixtures.SliceTransport{ + { + Method: "POST", + Resource: "/oidc/accounts/xyz/v1/token", + Response: `access_token=refreshed&refresh_token=def`, + ResponseHeaders: map[string][]string{ + "Content-Type": {"application/x-www-form-urlencoded"}, + }, + }, + }, + }), + u2m.WithOAuthEndpointSupplier(MockOAuthEndpointSupplier{}), + u2m.WithOAuthArgument(arg), + ) + require.NoError(t, err) + defer p.Close() + tok, err := p.Token() + assert.NoError(t, err) + assert.Equal(t, "refreshed", tok.AccessToken) + assert.Equal(t, "", tok.RefreshToken) +} + +func TestToken_ReturnsError(t *testing.T) { + ctx := context.Background() + cache := &tokenCacheMock{ + lookup: func(key string) (*oauth2.Token, error) { + assert.Equal(t, "https://accounts.cloud.databricks.com/oidc/accounts/xyz", key) + return &oauth2.Token{ + AccessToken: "expired", + RefreshToken: "cde", + Expiry: time.Now().Add(-1 * time.Minute), + }, nil + }, + } + arg, err := u2m.NewBasicAccountOAuthArgument("https://accounts.cloud.databricks.com", "xyz") + assert.NoError(t, err) + p, err := u2m.NewPersistentAuth( + ctx, + u2m.WithTokenCache(cache), + u2m.WithHttpClient(&http.Client{ + Transport: fixtures.SliceTransport{ + { + Method: "POST", + Resource: "/oidc/accounts/xyz/v1/token", + Response: `{"error": "invalid_grant", "error_description": "Invalid Client"}`, + Status: 401, + }, + }, + }), + u2m.WithOAuthEndpointSupplier(MockOAuthEndpointSupplier{}), + u2m.WithOAuthArgument(arg), + ) + require.NoError(t, err) + defer p.Close() + tok, err := p.Token() + assert.Nil(t, tok) + assert.ErrorContains(t, err, "Invalid Client (error code: invalid_grant)") +} + +func TestToken_ReturnsInvalidRefreshTokenError(t *testing.T) { + ctx := context.Background() + cache := &tokenCacheMock{ + lookup: func(key string) (*oauth2.Token, error) { + assert.Equal(t, "https://accounts.cloud.databricks.com/oidc/accounts/xyz", key) + return &oauth2.Token{ + AccessToken: "expired", + RefreshToken: "cde", + Expiry: time.Now().Add(-1 * time.Minute), + }, nil + }, + } + arg, err := u2m.NewBasicAccountOAuthArgument("https://accounts.cloud.databricks.com", "xyz") + assert.NoError(t, err) + p, err := u2m.NewPersistentAuth( + ctx, + u2m.WithTokenCache(cache), + u2m.WithHttpClient(&http.Client{ + Transport: fixtures.SliceTransport{ + { + Method: "POST", + Resource: "/oidc/accounts/xyz/v1/token", + Response: `{"error": "invalid_grant", "error_description": "Refresh token is invalid"}`, + Status: 401, + }, + }, + }), + u2m.WithOAuthEndpointSupplier(MockOAuthEndpointSupplier{}), + u2m.WithOAuthArgument(arg), + ) + require.NoError(t, err) + defer p.Close() + tok, err := p.Token() + assert.Nil(t, tok) + target := &u2m.InvalidRefreshTokenError{} + assert.True(t, errors.As(err, &target)) +} + +func TestChallenge(t *testing.T) { + ctx := context.Background() + + browserOpened := make(chan string) + browser := func(redirect string) error { + u, err := url.ParseRequestURI(redirect) + if err != nil { + return err + } + assert.Equal(t, "/oidc/accounts/xyz/v1/authorize", u.Path) + // for now we're ignoring asserting the fields of the redirect + query := u.Query() + browserOpened <- query.Get("state") + return nil + } + cache := &tokenCacheMock{ + store: func(key string, tok *oauth2.Token) error { + assert.Equal(t, "https://accounts.cloud.databricks.com/oidc/accounts/xyz", key) + assert.Equal(t, "__THAT__", tok.AccessToken) + assert.Equal(t, "__SOMETHING__", tok.RefreshToken) + return nil + }, + } + arg, err := u2m.NewBasicAccountOAuthArgument("https://accounts.cloud.databricks.com", "xyz") + assert.NoError(t, err) + p, err := u2m.NewPersistentAuth( + ctx, + u2m.WithTokenCache(cache), + u2m.WithBrowser(browser), + u2m.WithHttpClient(&http.Client{ + Transport: fixtures.SliceTransport{ + { + Method: "POST", + Resource: "/oidc/accounts/xyz/v1/token", + Response: `access_token=__THAT__&refresh_token=__SOMETHING__`, + ResponseHeaders: map[string][]string{ + "Content-Type": {"application/x-www-form-urlencoded"}, + }, + }, + }, + }), + u2m.WithOAuthEndpointSupplier(MockOAuthEndpointSupplier{}), + u2m.WithOAuthArgument(arg), + ) + require.NoError(t, err) + defer p.Close() + + errc := make(chan error) + go func() { + err := p.Challenge() + errc <- err + close(errc) + }() + + state := <-browserOpened + resp, err := http.Get(fmt.Sprintf("http://localhost:8020?code=__THIS__&state=%s", state)) + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, 200, resp.StatusCode) + + err = <-errc + assert.NoError(t, err) +} + +func TestChallenge_ReturnsErrorOnFailure(t *testing.T) { + ctx := context.Background() + browserOpened := make(chan string) + browser := func(redirect string) error { + u, err := url.ParseRequestURI(redirect) + if err != nil { + return err + } + assert.Equal(t, "/oidc/accounts/xyz/v1/authorize", u.Path) + // for now we're ignoring asserting the fields of the redirect + query := u.Query() + browserOpened <- query.Get("state") + return nil + } + arg, err := u2m.NewBasicAccountOAuthArgument("https://accounts.cloud.databricks.com", "xyz") + assert.NoError(t, err) + p, err := u2m.NewPersistentAuth(ctx, u2m.WithBrowser(browser), u2m.WithOAuthArgument(arg)) + require.NoError(t, err) + defer p.Close() + + errc := make(chan error) + go func() { + err := p.Challenge() + errc <- err + close(errc) + }() + + <-browserOpened + resp, err := http.Get( + "http://localhost:8020?error=access_denied&error_description=Policy%20evaluation%20failed%20for%20this%20request") + require.NoError(t, err) + defer resp.Body.Close() + assert.Equal(t, 400, resp.StatusCode) + + err = <-errc + assert.EqualError(t, err, "authorize: access_denied: Policy evaluation failed for this request") +} diff --git a/credentials/u2m/workspace_oauth_argument.go b/credentials/u2m/workspace_oauth_argument.go new file mode 100644 index 000000000..99a8a21e5 --- /dev/null +++ b/credentials/u2m/workspace_oauth_argument.go @@ -0,0 +1,51 @@ +package u2m + +import ( + "fmt" + "strings" +) + +// WorkspaceOAuthArgument is an interface that provides the necessary information +// to authenticate using OAuth to a specific workspace. +type WorkspaceOAuthArgument interface { + OAuthArgument + + // GetWorkspaceHost returns the host of the workspace to authenticate to. + GetWorkspaceHost() string +} + +// BasicWorkspaceOAuthArgument is a basic implementation of the WorkspaceOAuthArgument +// interface that links each host with exactly one OAuth token. +type BasicWorkspaceOAuthArgument struct { + // host is the host of the workspace to authenticate to. This must start + // with "https://" and must not have a trailing slash. + host string +} + +// NewBasicWorkspaceOAuthArgument creates a new BasicWorkspaceOAuthArgument. +func NewBasicWorkspaceOAuthArgument(host string) (BasicWorkspaceOAuthArgument, error) { + if !strings.HasPrefix(host, "https://") { + return BasicWorkspaceOAuthArgument{}, fmt.Errorf("host must start with 'https://': %s", host) + } + if strings.HasSuffix(host, "/") { + return BasicWorkspaceOAuthArgument{}, fmt.Errorf("host must not have a trailing slash: %s", host) + } + return BasicWorkspaceOAuthArgument{host: host}, nil +} + +// GetWorkspaceHost returns the host of the workspace to authenticate to. +func (a BasicWorkspaceOAuthArgument) GetWorkspaceHost() string { + return a.host +} + +// GetCacheKey returns a unique key for caching the OAuth token for the workspace. +// The key is in the format "". +func (a BasicWorkspaceOAuthArgument) GetCacheKey() string { + a.host = strings.TrimSuffix(a.host, "/") + if !strings.HasPrefix(a.host, "http") { + a.host = fmt.Sprintf("https://%s", a.host) + } + return a.host +} + +var _ WorkspaceOAuthArgument = BasicWorkspaceOAuthArgument{} diff --git a/go.mod b/go.mod index 0f7fbce9f..9dabdc5ba 100644 --- a/go.mod +++ b/go.mod @@ -6,11 +6,13 @@ require ( github.com/google/go-cmp v0.6.0 github.com/google/go-querystring v1.1.0 github.com/google/uuid v1.6.0 + github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c github.com/stretchr/testify v1.9.0 golang.org/x/exp v0.0.0-20240222234643-814bf88cf225 golang.org/x/mod v0.17.0 golang.org/x/net v0.33.0 golang.org/x/oauth2 v0.20.0 + golang.org/x/text v0.21.0 golang.org/x/time v0.5.0 google.golang.org/api v0.182.0 gopkg.in/ini.v1 v1.67.0 @@ -37,7 +39,6 @@ require ( go.opentelemetry.io/otel/trace v1.24.0 // indirect golang.org/x/crypto v0.31.0 // indirect golang.org/x/sys v0.28.0 // indirect - golang.org/x/text v0.21.0 // indirect google.golang.org/genproto/googleapis/rpc v0.0.0-20240521202816-d264139d666e // indirect google.golang.org/grpc v1.64.1 // indirect google.golang.org/protobuf v1.34.1 // indirect diff --git a/go.sum b/go.sum index 42cd14133..44b258fcc 100644 --- a/go.sum +++ b/go.sum @@ -58,6 +58,8 @@ github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+ github.com/googleapis/enterprise-certificate-proxy v0.3.2 h1:Vie5ybvEvT75RniqhfFxPRy3Bf7vr3h0cechB90XaQs= github.com/googleapis/enterprise-certificate-proxy v0.3.2/go.mod h1:VLSiSSBs/ksPL8kq3OBOQ6WRI2QnaFynd1DCjZ62+V0= github.com/googleapis/gax-go/v2 v2.12.4 h1:9gWcmF85Wvq4ryPFvGFaOgPIs1AQX0d0bcbGw4Z96qg= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ= +github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= @@ -113,6 +115,7 @@ golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5h golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA= golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= diff --git a/httpclient/fixtures/fixture.go b/httpclient/fixtures/fixture.go index 32b00fcbd..9a85c3c84 100644 --- a/httpclient/fixtures/fixture.go +++ b/httpclient/fixtures/fixture.go @@ -24,6 +24,7 @@ type HTTPFixture struct { Response any Status int + ResponseHeaders map[string][]string ExpectedRequest any ExpectedHeaders map[string]string PassFile string @@ -106,6 +107,7 @@ func (f HTTPFixture) replyWith(req *http.Request, body string) (*http.Response, StatusCode: f.Status, Status: http.StatusText(f.Status), Body: io.NopCloser(strings.NewReader(body)), + Header: f.ResponseHeaders, }, nil } diff --git a/httpclient/oauth_token.go b/httpclient/oauth_token.go index cb6ad5cc9..96f9a539e 100644 --- a/httpclient/oauth_token.go +++ b/httpclient/oauth_token.go @@ -5,15 +5,14 @@ import ( "net/http" "time" - "github.com/databricks/databricks-sdk-go/config/credentials" "golang.org/x/oauth2" ) const JWTGrantType = "urn:ietf:params:oauth:grant-type:jwt-bearer" -// GetOAuthTokenRequest is the request to get an OAuth token. It follows the OAuth 2.0 Rich Authorization Requests specification. +// getOAuthTokenRequest is the request to get an OAuth token. It follows the OAuth 2.0 Rich Authorization Requests specification. // https://datatracker.ietf.org/doc/html/rfc9396 -type GetOAuthTokenRequest struct { +type getOAuthTokenRequest struct { // Defines the method used to get the token. GrantType string `url:"grant_type"` // An array of authorization details that the token should be scoped to. This needs to be passed in string format. @@ -22,6 +21,24 @@ type GetOAuthTokenRequest struct { Assertion string `url:"assertion"` } +// oAuthToken represents an OAuth token as defined by the OAuth 2.0 Authorization Framework. +// https://datatracker.ietf.org/doc/html/rfc6749. +// +// The Go SDK maintains its own implementation of OAuth because Go's oauth2 +// library lacks two features that we depend on: +// 1. The ability to use an arbitrary assertion with the JWT grant type. +// 2. The ability to set authorization_details when getting an OAuth token. +type oAuthToken struct { + // The access token issued by the authorization server. This is the token that will be used to authenticate requests. + AccessToken string `json:"access_token"` + // Time in seconds until the token expires. + ExpiresIn int `json:"expires_in"` + // The scope of the token. This is a space-separated list of strings that represent the permissions granted by the token. + Scope string `json:"scope"` + // The type of token that was issued. + TokenType string `json:"token_type"` +} + // Returns a new OAuth token using the provided token. The token must be a JWT token. // The resulting token is scoped to the authorization details provided. // @@ -29,12 +46,12 @@ type GetOAuthTokenRequest struct { // without warning. func (c *ApiClient) GetOAuthToken(ctx context.Context, authDetails string, token *oauth2.Token) (*oauth2.Token, error) { path := "/oidc/v1/token" - data := GetOAuthTokenRequest{ + data := getOAuthTokenRequest{ GrantType: JWTGrantType, AuthorizationDetails: authDetails, Assertion: token.AccessToken, } - var response credentials.OAuthToken + var response oAuthToken opts := []DoOption{ WithUrlEncodedData(data), WithResponseUnmarshal(&response), diff --git a/httpclient/request_test.go b/httpclient/request_test.go index 695875099..59f6e351d 100644 --- a/httpclient/request_test.go +++ b/httpclient/request_test.go @@ -55,7 +55,7 @@ func TestMakeRequestBodyFromReader(t *testing.T) { } func TestUrlEncoding(t *testing.T) { - data := GetOAuthTokenRequest{ + data := getOAuthTokenRequest{ Assertion: "assertion", AuthorizationDetails: "[{\"a\":\"b\"}]", GrantType: "grant", From c920df89427f04c6fc88994bd55f2b6c663e913c Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Wed, 26 Mar 2025 12:43:36 +0100 Subject: [PATCH 34/54] Udate OpenAPI spec (#1195) ## What changes are proposed in this pull request? Udate OpenAPI spec ## How is this tested? N/A --- .codegen/_openapi_sha | 2 +- NEXT_CHANGELOG.md | 31 + .../dashboards/mock_genie_interface.go | 59 ++ service/apps/model.go | 8 + service/billing/model.go | 3 +- service/catalog/model.go | 13 +- service/compute/api.go | 74 +-- service/compute/impl.go | 7 +- service/compute/interface.go | 26 +- service/compute/model.go | 559 ++++++++++++------ service/dashboards/api.go | 7 + service/dashboards/impl.go | 10 + service/dashboards/interface.go | 7 + service/dashboards/model.go | 40 +- service/iam/impl.go | 12 - service/iam/model.go | 41 +- service/jobs/model.go | 6 +- service/marketplace/model.go | 15 +- service/ml/model.go | 24 +- service/oauth2/model.go | 7 + service/pipelines/model.go | 59 +- service/pkg.go | 2 +- service/serving/model.go | 86 ++- service/settings/model.go | 8 +- service/sharing/model.go | 76 +-- service/sql/model.go | 51 ++ 26 files changed, 886 insertions(+), 347 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index a7b80d538..2924d5d6d 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -cd641c9dd4febe334b339dd7878d099dcf0eeab5 \ No newline at end of file +31b3fea21dbe5a3a652937691602eb66d6dba30b \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 3d3adac6e..a5567234b 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -17,3 +17,34 @@ error ([PR #1189](https://github.com/databricks/databricks-sdk-go/pull/1189)). ### API Changes +* Added `GenerateDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Added `EffectiveUserApiScopes`, `Oauth2AppClientId`, `Oauth2AppIntegrationId` and `UserApiScopes` fields for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). +* Added `Abfss`, `Dbfs`, `ErrorMessage`, `ExecutionDurationSeconds`, `File`, `Gcs`, `S3`, `Status`, `Volumes` and `Workspace` fields for [compute.InitScriptInfoAndExecutionDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InitScriptInfoAndExecutionDetails). +* [Breaking] Added `ForecastGranularity` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* Added `JwksUri` field for [oauth2.OidcFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#OidcFederationPolicy). +* Added `EventLog` field for [pipelines.CreatePipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#CreatePipeline). +* Added `EventLog` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline). +* Added `EventLog` field for [pipelines.PipelineSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineSpec). +* Added `FallbackConfig` field for [serving.AiGatewayConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AiGatewayConfig). +* Added `CustomProviderConfig` field for [serving.ExternalModel](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalModel). +* Added `FallbackConfig` field for [serving.PutAiGatewayRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#PutAiGatewayRequest). +* Added `FallbackConfig` field for [serving.PutAiGatewayResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#PutAiGatewayResponse). +* Added `Aliases`, `Comment`, `DataType`, `DependencyList`, `FullDataType`, `Id`, `InputParams`, `Name`, `Properties`, `RoutineDefinition`, `Schema`, `SecurableKind`, `Share`, `ShareId`, `StorageLocation` and `Tags` fields for [sharing.DeltaSharingFunction](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#DeltaSharingFunction). +* Added `QuerySource` field for [sql.QueryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryInfo). +* Added `ForeignCatalog` enum value for [catalog.CatalogType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogType). +* Added `Browse` enum value for [catalog.Privilege](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#Privilege). +* Added `AccessTokenFailure`, `AllocationTimeout`, `AllocationTimeoutNodeDaemonNotReady`, `AllocationTimeoutNoHealthyClusters`, `AllocationTimeoutNoMatchedClusters`, `AllocationTimeoutNoReadyClusters`, `AllocationTimeoutNoUnallocatedClusters`, `AllocationTimeoutNoWarmedUpClusters`, `AwsInaccessibleKmsKeyFailure`, `AwsInstanceProfileUpdateFailure`, `AwsInvalidKeyPair`, `AwsInvalidKmsKeyState`, `AwsResourceQuotaExceeded`, `AzurePackedDeploymentPartialFailure`, `BootstrapTimeoutDueToMisconfig`, `BudgetPolicyLimitEnforcementActivated`, `BudgetPolicyResolutionFailure`, `CloudAccountSetupFailure`, `CloudOperationCancelled`, `CloudProviderInstanceNotLaunched`, `CloudProviderLaunchFailureDueToMisconfig`, `CloudProviderResourceStockoutDueToMisconfig`, `ClusterOperationThrottled`, `ClusterOperationTimeout`, `ControlPlaneRequestFailureDueToMisconfig`, `DataAccessConfigChanged`, `DisasterRecoveryReplication`, `DriverEviction`, `DriverLaunchTimeout`, `DriverNodeUnreachable`, `DriverOutOfDisk`, `DriverOutOfMemory`, `DriverPodCreationFailure`, `DriverUnexpectedFailure`, `DynamicSparkConfSizeExceeded`, `EosSparkImage`, `ExecutorPodUnscheduled`, `GcpApiRateQuotaExceeded`, `GcpForbidden`, `GcpIamTimeout`, `GcpInaccessibleKmsKeyFailure`, `GcpInsufficientCapacity`, `GcpIpSpaceExhausted`, `GcpKmsKeyPermissionDenied`, `GcpNotFound`, `GcpResourceQuotaExceeded`, `GcpServiceAccountAccessDenied`, `GcpServiceAccountNotFound`, `GcpSubnetNotReady`, `GcpTrustedImageProjectsViolated`, `GkeBasedClusterTermination`, `InitContainerNotFinished`, `InstancePoolMaxCapacityReached`, `InstancePoolNotFound`, `InstanceUnreachableDueToMisconfig`, `InternalCapacityFailure`, `InvalidAwsParameter`, `InvalidInstancePlacementProtocol`, `InvalidWorkerImageFailure`, `InPenaltyBox`, `LazyAllocationTimeout`, `MaintenanceMode`, `NetvisorSetupTimeout`, `NoMatchedK8s`, `NoMatchedK8sTestingTag`, `PodAssignmentFailure`, `PodSchedulingFailure`, `ResourceUsageBlocked`, `SecretCreationFailure`, `ServerlessLongRunningTerminated`, `SparkImageDownloadThrottled`, `SparkImageNotFound`, `SshBootstrapFailure`, `StorageDownloadFailureDueToMisconfig`, `StorageDownloadFailureSlow`, `StorageDownloadFailureThrottled`, `UnexpectedPodRecreation`, `UserInitiatedVmTermination` and `WorkspaceUpdate` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* Added `GeneratedSqlQueryTooLongException` and `MissingSqlQueryException` enum values for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). +* Added `Balanced` enum value for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). +* Added `ListingResource` enum value for [marketplace.FileParentType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#FileParentType). +* Added `App` enum value for [marketplace.MarketplaceFileType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#MarketplaceFileType). +* Added `Custom` enum value for [serving.ExternalModelProvider](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalModelProvider). +* Added `ArclightMultiTenantAzureExchangeToken` and `ArclightMultiTenantAzureExchangeTokenWithUserDelegationKey` enum values for [settings.TokenType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#TokenType). +* [Breaking] Changed `CreateExperiment` method for [w.Forecasting](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ForecastingAPI) workspace-level service with new required argument order. +* Changed `InstanceTypeId` field for [compute.NodeInstanceType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeInstanceType) to be required. +* Changed `Category` field for [compute.NodeType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeType) to be required. +* [Breaking] Changed `Functions` field for [sharing.ListProviderShareAssetsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#ListProviderShareAssetsResponse) to type [sharing.DeltaSharingFunctionList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#DeltaSharingFunctionList). +* [Breaking] Removed `ExecutionDetails` and `Script` fields for [compute.InitScriptInfoAndExecutionDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InitScriptInfoAndExecutionDetails). +* [Breaking] Removed `SupportsElasticDisk` field for [compute.NodeType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeType). +* [Breaking] Removed `DataGranularityQuantity` and `DataGranularityUnit` fields for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* [Breaking] Removed `Aliases`, `Comment`, `DataType`, `DependencyList`, `FullDataType`, `Id`, `InputParams`, `Name`, `Properties`, `RoutineDefinition`, `Schema`, `SecurableKind`, `Share`, `ShareId`, `StorageLocation` and `Tags` fields for [sharing.Function](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#Function). diff --git a/experimental/mocks/service/dashboards/mock_genie_interface.go b/experimental/mocks/service/dashboards/mock_genie_interface.go index f3e485435..c4ac32fd4 100644 --- a/experimental/mocks/service/dashboards/mock_genie_interface.go +++ b/experimental/mocks/service/dashboards/mock_genie_interface.go @@ -277,6 +277,65 @@ func (_c *MockGenieInterface_ExecuteMessageQuery_Call) RunAndReturn(run func(con return _c } +// GenerateDownloadFullQueryResult provides a mock function with given fields: ctx, request +func (_m *MockGenieInterface) GenerateDownloadFullQueryResult(ctx context.Context, request dashboards.GenieGenerateDownloadFullQueryResultRequest) (*dashboards.GenieGenerateDownloadFullQueryResultResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GenerateDownloadFullQueryResult") + } + + var r0 *dashboards.GenieGenerateDownloadFullQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGenerateDownloadFullQueryResultRequest) (*dashboards.GenieGenerateDownloadFullQueryResultResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGenerateDownloadFullQueryResultRequest) *dashboards.GenieGenerateDownloadFullQueryResultResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGenerateDownloadFullQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieGenerateDownloadFullQueryResultRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GenerateDownloadFullQueryResult_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GenerateDownloadFullQueryResult' +type MockGenieInterface_GenerateDownloadFullQueryResult_Call struct { + *mock.Call +} + +// GenerateDownloadFullQueryResult is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GenieGenerateDownloadFullQueryResultRequest +func (_e *MockGenieInterface_Expecter) GenerateDownloadFullQueryResult(ctx interface{}, request interface{}) *MockGenieInterface_GenerateDownloadFullQueryResult_Call { + return &MockGenieInterface_GenerateDownloadFullQueryResult_Call{Call: _e.mock.On("GenerateDownloadFullQueryResult", ctx, request)} +} + +func (_c *MockGenieInterface_GenerateDownloadFullQueryResult_Call) Run(run func(ctx context.Context, request dashboards.GenieGenerateDownloadFullQueryResultRequest)) *MockGenieInterface_GenerateDownloadFullQueryResult_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GenieGenerateDownloadFullQueryResultRequest)) + }) + return _c +} + +func (_c *MockGenieInterface_GenerateDownloadFullQueryResult_Call) Return(_a0 *dashboards.GenieGenerateDownloadFullQueryResultResponse, _a1 error) *MockGenieInterface_GenerateDownloadFullQueryResult_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GenerateDownloadFullQueryResult_Call) RunAndReturn(run func(context.Context, dashboards.GenieGenerateDownloadFullQueryResultRequest) (*dashboards.GenieGenerateDownloadFullQueryResultResponse, error)) *MockGenieInterface_GenerateDownloadFullQueryResult_Call { + _c.Call.Return(run) + return _c +} + // GetMessage provides a mock function with given fields: ctx, request func (_m *MockGenieInterface) GetMessage(ctx context.Context, request dashboards.GenieGetConversationMessageRequest) (*dashboards.GenieMessage, error) { ret := _m.Called(ctx, request) diff --git a/service/apps/model.go b/service/apps/model.go index 0a5ba614f..59d2b1a59 100755 --- a/service/apps/model.go +++ b/service/apps/model.go @@ -30,11 +30,17 @@ type App struct { Description string `json:"description,omitempty"` EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` + // The effective api scopes granted to the user access token. + EffectiveUserApiScopes []string `json:"effective_user_api_scopes,omitempty"` // The unique identifier of the app. Id string `json:"id,omitempty"` // The name of the app. The name must contain only lowercase alphanumeric // characters and hyphens. It must be unique within the workspace. Name string `json:"name"` + + Oauth2AppClientId string `json:"oauth2_app_client_id,omitempty"` + + Oauth2AppIntegrationId string `json:"oauth2_app_integration_id,omitempty"` // The pending deployment of the app. A deployment is considered pending // when it is being prepared for deployment to the app compute. PendingDeployment *AppDeployment `json:"pending_deployment,omitempty"` @@ -53,6 +59,8 @@ type App struct { // The URL of the app once it is deployed. Url string `json:"url,omitempty"` + UserApiScopes []string `json:"user_api_scopes,omitempty"` + ForceSendFields []string `json:"-" url:"-"` } diff --git a/service/billing/model.go b/service/billing/model.go index 86d5fa32b..d9199a2e2 100644 --- a/service/billing/model.go +++ b/service/billing/model.go @@ -260,7 +260,8 @@ type BudgetPolicy struct { // unique. PolicyId string `json:"policy_id,omitempty"` // The name of the policy. - Must be unique among active policies. - Can - // contain only characters from the ISO 8859-1 (latin1) set. + // contain only characters from the ISO 8859-1 (latin1) set. - Can't start + // with reserved keywords such as `databricks:default-policy`. PolicyName string `json:"policy_name,omitempty"` ForceSendFields []string `json:"-" url:"-"` diff --git a/service/catalog/model.go b/service/catalog/model.go index 6b90fdb82..2658e51f2 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -436,6 +436,8 @@ type CatalogType string const CatalogTypeDeltasharingCatalog CatalogType = `DELTASHARING_CATALOG` +const CatalogTypeForeignCatalog CatalogType = `FOREIGN_CATALOG` + const CatalogTypeManagedCatalog CatalogType = `MANAGED_CATALOG` const CatalogTypeSystemCatalog CatalogType = `SYSTEM_CATALOG` @@ -448,11 +450,11 @@ func (f *CatalogType) String() string { // Set raw string value and validate it against allowed values func (f *CatalogType) Set(v string) error { switch v { - case `DELTASHARING_CATALOG`, `MANAGED_CATALOG`, `SYSTEM_CATALOG`: + case `DELTASHARING_CATALOG`, `FOREIGN_CATALOG`, `MANAGED_CATALOG`, `SYSTEM_CATALOG`: *f = CatalogType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "DELTASHARING_CATALOG", "MANAGED_CATALOG", "SYSTEM_CATALOG"`, v) + return fmt.Errorf(`value "%s" is not one of "DELTASHARING_CATALOG", "FOREIGN_CATALOG", "MANAGED_CATALOG", "SYSTEM_CATALOG"`, v) } } @@ -4423,6 +4425,8 @@ const PrivilegeAllPrivileges Privilege = `ALL_PRIVILEGES` const PrivilegeApplyTag Privilege = `APPLY_TAG` +const PrivilegeBrowse Privilege = `BROWSE` + const PrivilegeCreate Privilege = `CREATE` const PrivilegeCreateCatalog Privilege = `CREATE_CATALOG` @@ -4515,11 +4519,11 @@ func (f *Privilege) String() string { // Set raw string value and validate it against allowed values func (f *Privilege) Set(v string) error { switch v { - case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `BROWSE`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: *f = Privilege(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "BROWSE", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) } } @@ -5833,6 +5837,7 @@ type UpdateWorkspaceBindingsParameters struct { SecurableType UpdateBindingsSecurableType `json:"-" url:"-"` } +// Next ID: 17 type ValidateCredentialRequest struct { // The AWS IAM role configuration AwsIamRole *AwsIamRole `json:"aws_iam_role,omitempty"` diff --git a/service/compute/api.go b/service/compute/api.go index 4e834e706..d4aa79438 100755 --- a/service/compute/api.go +++ b/service/compute/api.go @@ -267,9 +267,12 @@ type ClustersInterface interface { // Create new cluster. // // Creates a new Spark cluster. This method will acquire new instances from the - // cloud provider if necessary. Note: Databricks may not be able to acquire some - // of the requested nodes, due to cloud provider limitations (account limits, - // spot price, etc.) or transient network issues. + // cloud provider if necessary. This method is asynchronous; the returned + // ``cluster_id`` can be used to poll the cluster status. When this method + // returns, the cluster will be in a ``PENDING`` state. The cluster will be + // usable once it enters a ``RUNNING`` state. Note: Databricks may not be able + // to acquire some of the requested nodes, due to cloud provider limitations + // (account limits, spot price, etc.) or transient network issues. // // If Databricks acquires at least 85% of the requested on-demand nodes, cluster // creation will succeed. Otherwise the cluster will terminate with an @@ -344,7 +347,7 @@ type ClustersInterface interface { // // Retrieves a list of events about the activity of a cluster. This API is // paginated. If there are more events to read, the response includes all the - // nparameters necessary to request the next page of events. + // parameters necessary to request the next page of events. // // This method is generated by Databricks SDK Code Generator. Events(ctx context.Context, request GetEvents) listing.Iterator[ClusterEvent] @@ -353,7 +356,7 @@ type ClustersInterface interface { // // Retrieves a list of events about the activity of a cluster. This API is // paginated. If there are more events to read, the response includes all the - // nparameters necessary to request the next page of events. + // parameters necessary to request the next page of events. // // This method is generated by Databricks SDK Code Generator. EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error) @@ -518,13 +521,12 @@ type ClustersInterface interface { // Start terminated cluster. // // Starts a terminated Spark cluster with the supplied ID. This works similar to - // `createCluster` except: - // - // * The previous cluster id and attributes are preserved. * The cluster starts - // with the last specified cluster size. * If the previous cluster was an - // autoscaling cluster, the current cluster starts with the minimum number of - // nodes. * If the cluster is not currently in a `TERMINATED` state, nothing - // will happen. * Clusters launched to run a job cannot be started. + // `createCluster` except: - The previous cluster id and attributes are + // preserved. - The cluster starts with the last specified cluster size. - If + // the previous cluster was an autoscaling cluster, the current cluster starts + // with the minimum number of nodes. - If the cluster is not currently in a + // ``TERMINATED`` state, nothing will happen. - Clusters launched to run a job + // cannot be started. Start(ctx context.Context, startCluster StartCluster) (*WaitGetClusterRunning[struct{}], error) // Calls [ClustersAPIInterface.Start] and waits to reach RUNNING state @@ -538,13 +540,12 @@ type ClustersInterface interface { // Start terminated cluster. // // Starts a terminated Spark cluster with the supplied ID. This works similar to - // `createCluster` except: - // - // * The previous cluster id and attributes are preserved. * The cluster starts - // with the last specified cluster size. * If the previous cluster was an - // autoscaling cluster, the current cluster starts with the minimum number of - // nodes. * If the cluster is not currently in a `TERMINATED` state, nothing - // will happen. * Clusters launched to run a job cannot be started. + // `createCluster` except: - The previous cluster id and attributes are + // preserved. - The cluster starts with the last specified cluster size. - If + // the previous cluster was an autoscaling cluster, the current cluster starts + // with the minimum number of nodes. - If the cluster is not currently in a + // ``TERMINATED`` state, nothing will happen. - Clusters launched to run a job + // cannot be started. StartByClusterId(ctx context.Context, clusterId string) error StartByClusterIdAndWait(ctx context.Context, clusterId string, options ...retries.Option[ClusterDetails]) (*ClusterDetails, error) @@ -741,9 +742,12 @@ func (w *WaitGetClusterTerminated[R]) GetWithTimeout(timeout time.Duration) (*Cl // Create new cluster. // // Creates a new Spark cluster. This method will acquire new instances from the -// cloud provider if necessary. Note: Databricks may not be able to acquire some -// of the requested nodes, due to cloud provider limitations (account limits, -// spot price, etc.) or transient network issues. +// cloud provider if necessary. This method is asynchronous; the returned +// “cluster_id“ can be used to poll the cluster status. When this method +// returns, the cluster will be in a “PENDING“ state. The cluster will be +// usable once it enters a “RUNNING“ state. Note: Databricks may not be able +// to acquire some of the requested nodes, due to cloud provider limitations +// (account limits, spot price, etc.) or transient network issues. // // If Databricks acquires at least 85% of the requested on-demand nodes, cluster // creation will succeed. Otherwise the cluster will terminate with an @@ -1125,13 +1129,12 @@ func (a *ClustersAPI) RestartAndWait(ctx context.Context, restartCluster Restart // Start terminated cluster. // // Starts a terminated Spark cluster with the supplied ID. This works similar to -// `createCluster` except: -// -// * The previous cluster id and attributes are preserved. * The cluster starts -// with the last specified cluster size. * If the previous cluster was an -// autoscaling cluster, the current cluster starts with the minimum number of -// nodes. * If the cluster is not currently in a `TERMINATED` state, nothing -// will happen. * Clusters launched to run a job cannot be started. +// `createCluster` except: - The previous cluster id and attributes are +// preserved. - The cluster starts with the last specified cluster size. - If +// the previous cluster was an autoscaling cluster, the current cluster starts +// with the minimum number of nodes. - If the cluster is not currently in a +// “TERMINATED“ state, nothing will happen. - Clusters launched to run a job +// cannot be started. func (a *ClustersAPI) Start(ctx context.Context, startCluster StartCluster) (*WaitGetClusterRunning[struct{}], error) { err := a.clustersImpl.Start(ctx, startCluster) if err != nil { @@ -1178,13 +1181,12 @@ func (a *ClustersAPI) StartAndWait(ctx context.Context, startCluster StartCluste // Start terminated cluster. // // Starts a terminated Spark cluster with the supplied ID. This works similar to -// `createCluster` except: -// -// * The previous cluster id and attributes are preserved. * The cluster starts -// with the last specified cluster size. * If the previous cluster was an -// autoscaling cluster, the current cluster starts with the minimum number of -// nodes. * If the cluster is not currently in a `TERMINATED` state, nothing -// will happen. * Clusters launched to run a job cannot be started. +// `createCluster` except: - The previous cluster id and attributes are +// preserved. - The cluster starts with the last specified cluster size. - If +// the previous cluster was an autoscaling cluster, the current cluster starts +// with the minimum number of nodes. - If the cluster is not currently in a +// “TERMINATED“ state, nothing will happen. - Clusters launched to run a job +// cannot be started. func (a *ClustersAPI) StartByClusterId(ctx context.Context, clusterId string) error { return a.clustersImpl.Start(ctx, StartCluster{ ClusterId: clusterId, diff --git a/service/compute/impl.go b/service/compute/impl.go index 0b6a7fbf0..f81423ec6 100755 --- a/service/compute/impl.go +++ b/service/compute/impl.go @@ -194,7 +194,7 @@ func (a *clustersImpl) Edit(ctx context.Context, request EditCluster) error { // // Retrieves a list of events about the activity of a cluster. This API is // paginated. If there are more events to read, the response includes all the -// nparameters necessary to request the next page of events. +// parameters necessary to request the next page of events. func (a *clustersImpl) Events(ctx context.Context, request GetEvents) listing.Iterator[ClusterEvent] { getNextPage := func(ctx context.Context, req GetEvents) (*GetEventsResponse, error) { @@ -224,7 +224,7 @@ func (a *clustersImpl) Events(ctx context.Context, request GetEvents) listing.It // // Retrieves a list of events about the activity of a cluster. This API is // paginated. If there are more events to read, the response includes all the -// nparameters necessary to request the next page of events. +// parameters necessary to request the next page of events. func (a *clustersImpl) EventsAll(ctx context.Context, request GetEvents) ([]ClusterEvent, error) { iterator := a.Events(ctx, request) return listing.ToSliceN[ClusterEvent, int64](ctx, iterator, request.Limit) @@ -308,8 +308,7 @@ func (a *clustersImpl) List(ctx context.Context, request ListClustersRequest) li // are not included. func (a *clustersImpl) ListAll(ctx context.Context, request ListClustersRequest) ([]ClusterDetails, error) { iterator := a.List(ctx, request) - return listing.ToSliceN[ClusterDetails, int](ctx, iterator, request.PageSize) - + return listing.ToSlice[ClusterDetails](ctx, iterator) } func (a *clustersImpl) internalList(ctx context.Context, request ListClustersRequest) (*ListClustersResponse, error) { diff --git a/service/compute/interface.go b/service/compute/interface.go index 05ecfd19f..87da5ac34 100755 --- a/service/compute/interface.go +++ b/service/compute/interface.go @@ -124,9 +124,13 @@ type ClustersService interface { // Create new cluster. // // Creates a new Spark cluster. This method will acquire new instances from - // the cloud provider if necessary. Note: Databricks may not be able to - // acquire some of the requested nodes, due to cloud provider limitations - // (account limits, spot price, etc.) or transient network issues. + // the cloud provider if necessary. This method is asynchronous; the + // returned ``cluster_id`` can be used to poll the cluster status. When this + // method returns, the cluster will be in a ``PENDING`` state. The cluster + // will be usable once it enters a ``RUNNING`` state. Note: Databricks may + // not be able to acquire some of the requested nodes, due to cloud provider + // limitations (account limits, spot price, etc.) or transient network + // issues. // // If Databricks acquires at least 85% of the requested on-demand nodes, // cluster creation will succeed. Otherwise the cluster will terminate with @@ -168,7 +172,7 @@ type ClustersService interface { // // Retrieves a list of events about the activity of a cluster. This API is // paginated. If there are more events to read, the response includes all - // the nparameters necessary to request the next page of events. + // the parameters necessary to request the next page of events. // // Use EventsAll() to get all ClusterEvent instances, which will iterate over every result page. Events(ctx context.Context, request GetEvents) (*GetEventsResponse, error) @@ -257,14 +261,12 @@ type ClustersService interface { // Start terminated cluster. // // Starts a terminated Spark cluster with the supplied ID. This works - // similar to `createCluster` except: - // - // * The previous cluster id and attributes are preserved. * The cluster - // starts with the last specified cluster size. * If the previous cluster - // was an autoscaling cluster, the current cluster starts with the minimum - // number of nodes. * If the cluster is not currently in a `TERMINATED` - // state, nothing will happen. * Clusters launched to run a job cannot be - // started. + // similar to `createCluster` except: - The previous cluster id and + // attributes are preserved. - The cluster starts with the last specified + // cluster size. - If the previous cluster was an autoscaling cluster, the + // current cluster starts with the minimum number of nodes. - If the cluster + // is not currently in a ``TERMINATED`` state, nothing will happen. - + // Clusters launched to run a job cannot be started. Start(ctx context.Context, request StartCluster) error // Unpin cluster. diff --git a/service/compute/model.go b/service/compute/model.go index 561eb3fc2..37bb90553 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -50,6 +50,7 @@ func (s AddInstanceProfile) MarshalJSON() ([]byte, error) { type AddResponse struct { } +// A storage location in Adls Gen2 type Adlsgen2Info struct { // abfss destination, e.g. // `abfss://@.dfs.core.windows.net/`. @@ -77,6 +78,8 @@ func (s AutoScale) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Attributes set during cluster creation which are related to Amazon Web +// Services. type AwsAttributes struct { // Availability type used for all subsequent nodes past the // `first_on_demand` ones. @@ -131,9 +134,6 @@ type AwsAttributes struct { // added to the Databricks environment by an account administrator. // // This feature may only be available to certain customer plans. - // - // If this field is ommitted, we will pull in the default from the conf if - // it exists. InstanceProfileArn string `json:"instance_profile_arn,omitempty"` // The bid price for AWS spot instances, as a percentage of the // corresponding instance type's on-demand price. For example, if this field @@ -145,10 +145,6 @@ type AwsAttributes struct { // instances whose bid price percentage matches this field will be // considered. Note that, for safety, we enforce this field to be no more // than 10000. - // - // The default value and documentation here should be kept consistent with - // CommonConf.defaultSpotBidPricePercent and - // CommonConf.maxSpotBidPricePercent. SpotBidPricePercent int `json:"spot_bid_price_percent,omitempty"` // Identifier for the availability zone/datacenter in which the cluster // resides. This string will be of a form like "us-west-2a". The provided @@ -158,8 +154,10 @@ type AwsAttributes struct { // optional field at cluster creation, and if not specified, a default zone // will be used. If the zone specified is "auto", will try to place cluster // in a zone with high availability, and will retry placement in a different - // AZ if there is not enough capacity. The list of available zones as well - // as the default value can be found by using the `List Zones` method. + // AZ if there is not enough capacity. + // + // The list of available zones as well as the default value can be found by + // using the `List Zones` method. ZoneId string `json:"zone_id,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -207,11 +205,11 @@ func (f *AwsAvailability) Type() string { return "AwsAvailability" } +// Attributes set during cluster creation which are related to Microsoft Azure. type AzureAttributes struct { // Availability type used for all subsequent nodes past the - // `first_on_demand` ones. Note: If `first_on_demand` is zero (which only - // happens on pool clusters), this availability type will be used for the - // entire cluster. + // `first_on_demand` ones. Note: If `first_on_demand` is zero, this + // availability type will be used for the entire cluster. Availability AzureAvailability `json:"availability,omitempty"` // The first `first_on_demand` nodes of the cluster will be placed on // on-demand instances. This value should be greater than 0, to make sure @@ -244,8 +242,8 @@ func (s AzureAttributes) MarshalJSON() ([]byte, error) { } // Availability type used for all subsequent nodes past the `first_on_demand` -// ones. Note: If `first_on_demand` is zero (which only happens on pool -// clusters), this availability type will be used for the entire cluster. +// ones. Note: If `first_on_demand` is zero, this availability type will be used +// for the entire cluster. type AzureAvailability string const AzureAvailabilityOnDemandAzure AzureAvailability = `ON_DEMAND_AZURE` @@ -297,7 +295,6 @@ type CancelResponse struct { } type ChangeClusterOwner struct { - // ClusterId string `json:"cluster_id"` // New owner of the cluster_id after this RPC. OwnerUsername string `json:"owner_username"` @@ -329,6 +326,7 @@ type CloneCluster struct { } type CloudProviderNodeInfo struct { + // Status as reported by the cloud provider Status []CloudProviderNodeStatus `json:"status,omitempty"` } @@ -403,6 +401,8 @@ func (s ClusterAccessControlResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Common set of attributes set during cluster creation. These attributes cannot +// be changed over the lifetime of a cluster. type ClusterAttributes struct { // Automatically terminates the cluster after it is inactive for this time // in minutes. If not set, this cluster will not be automatically @@ -466,7 +466,7 @@ type ClusterAttributes struct { // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` - + // Custom docker image BYOC DockerImage *DockerImage `json:"docker_image,omitempty"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id @@ -475,6 +475,11 @@ type ClusterAttributes struct { // The node type of the Spark driver. Note that this field is optional; if // unset, the driver node type will be set as the same value as // `node_type_id` defined above. + // + // This field, along with node_type_id, should not be set if + // virtual_cluster_size is set. If both driver_node_type_id, node_type_id, + // and virtual_cluster_size are specified, driver_node_type_id and + // node_type_id take precedence. DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` // Autoscaling Local Storage: when enabled, this cluster will dynamically // acquire additional disk space when its Spark workers are running low on @@ -569,7 +574,7 @@ type ClusterAttributes struct { // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or // not. UseMlRuntime bool `json:"use_ml_runtime,omitempty"` - + // Cluster Attributes showing for clusters workload types. WorkloadType *WorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -606,6 +611,7 @@ func (s ClusterCompliance) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Describes all of the metadata about a single Spark cluster in Databricks. type ClusterDetails struct { // Parameters needed in order to automatically scale clusters up and down // based on load. Note: autoscaling works best with DB runtime versions 3.0 @@ -646,8 +652,7 @@ type ClusterDetails struct { // not specified at creation, the cluster name will be an empty string. ClusterName string `json:"cluster_name,omitempty"` // Determines whether the cluster was created by a user through the UI, - // created by the Databricks Jobs Scheduler, or through an API request. This - // is the same as cluster_creator, but read only. + // created by the Databricks Jobs Scheduler, or through an API request. ClusterSource ClusterSource `json:"cluster_source,omitempty"` // Creator user name. The field won't be included in the response if the // user has already been deleted. @@ -704,7 +709,7 @@ type ClusterDetails struct { // // - Name: DefaultTags map[string]string `json:"default_tags,omitempty"` - + // Custom docker image BYOC DockerImage *DockerImage `json:"docker_image,omitempty"` // Node on which the Spark driver resides. The driver node contains the // Spark master and the Databricks application that manages the per-notebook @@ -717,6 +722,11 @@ type ClusterDetails struct { // The node type of the Spark driver. Note that this field is optional; if // unset, the driver node type will be set as the same value as // `node_type_id` defined above. + // + // This field, along with node_type_id, should not be set if + // virtual_cluster_size is set. If both driver_node_type_id, node_type_id, + // and virtual_cluster_size are specified, driver_node_type_id and + // node_type_id take precedence. DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` // Autoscaling Local Storage: when enabled, this cluster will dynamically // acquire additional disk space when its Spark workers are running low on @@ -826,10 +836,9 @@ type ClusterDetails struct { // available Spark versions can be retrieved by using the // :method:clusters/sparkVersions API call. SparkVersion string `json:"spark_version,omitempty"` - // `spec` contains a snapshot of the field values that were used to create - // or edit this cluster. The contents of `spec` can be used in the body of a - // create cluster request. This field might not be populated for older - // clusters. Note: not included in the response of the ListClusters API. + // The spec contains a snapshot of the latest user specified settings that + // were used to create/edit the cluster. Note: not included in the response + // of the ListClusters API. Spec *ClusterSpec `json:"spec,omitempty"` // SSH public key contents that will be added to each Spark node in this // cluster. The corresponding private keys can be used to login with the @@ -855,7 +864,7 @@ type ClusterDetails struct { // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or // not. UseMlRuntime bool `json:"use_ml_runtime,omitempty"` - + // Cluster Attributes showing for clusters workload types. WorkloadType *WorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -870,11 +879,10 @@ func (s ClusterDetails) MarshalJSON() ([]byte, error) { } type ClusterEvent struct { - // ClusterId string `json:"cluster_id"` - // + DataPlaneEventDetails *DataPlaneEventDetails `json:"data_plane_event_details,omitempty"` - // + Details *EventDetails `json:"details,omitempty"` // The timestamp when the event occurred, stored as the number of // milliseconds since the Unix epoch. If not provided, this will be assigned @@ -911,6 +919,7 @@ func (s ClusterLibraryStatuses) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Cluster log delivery config type ClusterLogConf struct { // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : // "dbfs:/home/cluster_log" } }` @@ -921,7 +930,7 @@ type ClusterLogConf struct { // the cluster iam role in `instance_profile_arn` has permission to write // data to the s3 destination. S3 *S3StorageInfo `json:"s3,omitempty"` - // destination needs to be provided. e.g. `{ "volumes" : { "destination" : + // destination needs to be provided, e.g. `{ "volumes": { "destination": // "/Volumes/catalog/schema/volume/cluster_log" } }` Volumes *VolumesStorageInfo `json:"volumes,omitempty"` } @@ -1237,6 +1246,8 @@ func (f *ClusterSource) Type() string { return "ClusterSource" } +// Contains a snapshot of the latest user specified settings that were used to +// create/edit the cluster. type ClusterSpec struct { // When set to true, fixed and default values from the policy will be used // for fields that are omitted. When set to false, only fixed values from @@ -1308,7 +1319,7 @@ type ClusterSpec struct { // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` - + // Custom docker image BYOC DockerImage *DockerImage `json:"docker_image,omitempty"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id @@ -1317,6 +1328,11 @@ type ClusterSpec struct { // The node type of the Spark driver. Note that this field is optional; if // unset, the driver node type will be set as the same value as // `node_type_id` defined above. + // + // This field, along with node_type_id, should not be set if + // virtual_cluster_size is set. If both driver_node_type_id, node_type_id, + // and virtual_cluster_size are specified, driver_node_type_id and + // node_type_id take precedence. DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` // Autoscaling Local Storage: when enabled, this cluster will dynamically // acquire additional disk space when its Spark workers are running low on @@ -1422,7 +1438,7 @@ type ClusterSpec struct { // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or // not. UseMlRuntime bool `json:"use_ml_runtime,omitempty"` - + // Cluster Attributes showing for clusters workload types. WorkloadType *WorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1651,7 +1667,7 @@ type CreateCluster struct { // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` - + // Custom docker image BYOC DockerImage *DockerImage `json:"docker_image,omitempty"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id @@ -1660,6 +1676,11 @@ type CreateCluster struct { // The node type of the Spark driver. Note that this field is optional; if // unset, the driver node type will be set as the same value as // `node_type_id` defined above. + // + // This field, along with node_type_id, should not be set if + // virtual_cluster_size is set. If both driver_node_type_id, node_type_id, + // and virtual_cluster_size are specified, driver_node_type_id and + // node_type_id take precedence. DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` // Autoscaling Local Storage: when enabled, this cluster will dynamically // acquire additional disk space when its Spark workers are running low on @@ -1765,7 +1786,7 @@ type CreateCluster struct { // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or // not. UseMlRuntime bool `json:"use_ml_runtime,omitempty"` - + // Cluster Attributes showing for clusters workload types. WorkloadType *WorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -2010,13 +2031,12 @@ func (s CustomPolicyTag) MarshalJSON() ([]byte, error) { } type DataPlaneEventDetails struct { - // EventType DataPlaneEventDetailsEventType `json:"event_type,omitempty"` - // + ExecutorFailures int `json:"executor_failures,omitempty"` - // + HostId string `json:"host_id,omitempty"` - // + Timestamp int64 `json:"timestamp,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -2030,7 +2050,6 @@ func (s DataPlaneEventDetails) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// type DataPlaneEventDetailsEventType string const DataPlaneEventDetailsEventTypeNodeBlacklisted DataPlaneEventDetailsEventType = `NODE_BLACKLISTED` @@ -2149,6 +2168,7 @@ func (f *DataSecurityMode) Type() string { return "DataSecurityMode" } +// A storage location in DBFS type DbfsStorageInfo struct { // dbfs destination, e.g. `dbfs:/my/path` Destination string `json:"destination"` @@ -2347,7 +2367,8 @@ func (s DockerImage) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The type of EBS volumes that will be launched with this cluster. +// All EBS volume types that Databricks supports. See +// https://aws.amazon.com/ebs/details/ for details. type EbsVolumeType string const EbsVolumeTypeGeneralPurposeSsd EbsVolumeType = `GENERAL_PURPOSE_SSD` @@ -2448,7 +2469,7 @@ type EditCluster struct { // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` - + // Custom docker image BYOC DockerImage *DockerImage `json:"docker_image,omitempty"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id @@ -2457,6 +2478,11 @@ type EditCluster struct { // The node type of the Spark driver. Note that this field is optional; if // unset, the driver node type will be set as the same value as // `node_type_id` defined above. + // + // This field, along with node_type_id, should not be set if + // virtual_cluster_size is set. If both driver_node_type_id, node_type_id, + // and virtual_cluster_size are specified, driver_node_type_id and + // node_type_id take precedence. DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` // Autoscaling Local Storage: when enabled, this cluster will dynamically // acquire additional disk space when its Spark workers are running low on @@ -2562,7 +2588,7 @@ type EditCluster struct { // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or // not. UseMlRuntime bool `json:"use_ml_runtime,omitempty"` - + // Cluster Attributes showing for clusters workload types. WorkloadType *WorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -2747,7 +2773,7 @@ type EventDetails struct { CurrentNumVcpus int `json:"current_num_vcpus,omitempty"` // The current number of nodes in the cluster. CurrentNumWorkers int `json:"current_num_workers,omitempty"` - // + DidNotExpandReason string `json:"did_not_expand_reason,omitempty"` // Current disk size in bytes DiskSize int64 `json:"disk_size,omitempty"` @@ -2756,7 +2782,7 @@ type EventDetails struct { // Whether or not a blocklisted node should be terminated. For // ClusterEventType NODE_BLACKLISTED. EnableTerminationForNodeBlocklisted bool `json:"enable_termination_for_node_blocklisted,omitempty"` - // + FreeSpace int64 `json:"free_space,omitempty"` // List of global and cluster init scripts associated with this cluster // event. @@ -2909,12 +2935,13 @@ func (f *EventType) Type() string { return "EventType" } +// Attributes set during cluster creation which are related to GCP. type GcpAttributes struct { - // This field determines whether the instance pool will contain preemptible - // VMs, on-demand VMs, or preemptible VMs with a fallback to on-demand VMs - // if the former is unavailable. + // This field determines whether the spark executors will be scheduled to + // run on preemptible VMs, on-demand VMs, or preemptible VMs with a fallback + // to on-demand VMs if the former is unavailable. Availability GcpAvailability `json:"availability,omitempty"` - // boot disk size in GB + // Boot disk size in GB BootDiskSize int `json:"boot_disk_size,omitempty"` // If provided, the cluster will impersonate the google service account when // accessing gcloud services (like GCS). The google service account must @@ -2931,11 +2958,11 @@ type GcpAttributes struct { // This field determines whether the spark executors will be scheduled to // run on preemptible VMs (when set to true) versus standard compute engine // VMs (when set to false; default). Note: Soon to be deprecated, use the - // availability field instead. + // 'availability' field instead. UsePreemptibleExecutors bool `json:"use_preemptible_executors,omitempty"` // Identifier for the availability zone in which the cluster resides. This // can be one of the following: - "HA" => High availability, spread nodes - // across availability zones for a Databricks deployment region [default] - + // across availability zones for a Databricks deployment region [default]. - // "AUTO" => Databricks picks an availability zone to schedule the cluster // on. - A GCP availability zone => Pick One of the available zones for // (machine type + region) from @@ -2985,6 +3012,7 @@ func (f *GcpAvailability) Type() string { return "GcpAvailability" } +// A storage location in Google Cloud Platform's GCS type GcsStorageInfo struct { // GCS destination/URI, e.g. `gs://my-bucket/some-prefix` Destination string `json:"destination"` @@ -3097,7 +3125,6 @@ func (s GetEvents) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The order to list events in; either "ASC" or "DESC". Defaults to "DESC". type GetEventsOrder string const GetEventsOrderAsc GetEventsOrder = `ASC` @@ -3126,7 +3153,6 @@ func (f *GetEventsOrder) Type() string { } type GetEventsResponse struct { - // Events []ClusterEvent `json:"events,omitempty"` // The parameters required to retrieve the next page of events. Omitted if // there are no more events to read. @@ -3417,11 +3443,16 @@ func (s GlobalInitScriptUpdateRequest) MarshalJSON() ([]byte, error) { } type InitScriptEventDetails struct { - // The cluster scoped init scripts associated with this cluster event + // The cluster scoped init scripts associated with this cluster event. Cluster []InitScriptInfoAndExecutionDetails `json:"cluster,omitempty"` - // The global init scripts associated with this cluster event + // The global init scripts associated with this cluster event. Global []InitScriptInfoAndExecutionDetails `json:"global,omitempty"` - // The private ip address of the node where the init scripts were run. + // The private ip of the node we are reporting init script execution details + // for (we will select the execution details from only one node rather than + // reporting the execution details from every node to keep these event + // details small) + // + // This should only be defined for the INIT_SCRIPTS_FINISHED event ReportedForNode string `json:"reported_for_node,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -3435,94 +3466,115 @@ func (s InitScriptEventDetails) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type InitScriptExecutionDetails struct { - // Addition details regarding errors. - ErrorMessage string `json:"error_message,omitempty"` - // The duration of the script execution in seconds. - ExecutionDurationSeconds int `json:"execution_duration_seconds,omitempty"` - // The current status of the script - Status InitScriptExecutionDetailsStatus `json:"status,omitempty"` - - ForceSendFields []string `json:"-" url:"-"` -} - -func (s *InitScriptExecutionDetails) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s InitScriptExecutionDetails) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} +// Result of attempted script execution +type InitScriptExecutionDetailsInitScriptExecutionStatus string -// The current status of the script -type InitScriptExecutionDetailsStatus string +const InitScriptExecutionDetailsInitScriptExecutionStatusFailedExecution InitScriptExecutionDetailsInitScriptExecutionStatus = `FAILED_EXECUTION` -const InitScriptExecutionDetailsStatusFailedExecution InitScriptExecutionDetailsStatus = `FAILED_EXECUTION` +const InitScriptExecutionDetailsInitScriptExecutionStatusFailedFetch InitScriptExecutionDetailsInitScriptExecutionStatus = `FAILED_FETCH` -const InitScriptExecutionDetailsStatusFailedFetch InitScriptExecutionDetailsStatus = `FAILED_FETCH` +const InitScriptExecutionDetailsInitScriptExecutionStatusFuseMountFailed InitScriptExecutionDetailsInitScriptExecutionStatus = `FUSE_MOUNT_FAILED` -const InitScriptExecutionDetailsStatusNotExecuted InitScriptExecutionDetailsStatus = `NOT_EXECUTED` +const InitScriptExecutionDetailsInitScriptExecutionStatusNotExecuted InitScriptExecutionDetailsInitScriptExecutionStatus = `NOT_EXECUTED` -const InitScriptExecutionDetailsStatusSkipped InitScriptExecutionDetailsStatus = `SKIPPED` +const InitScriptExecutionDetailsInitScriptExecutionStatusSkipped InitScriptExecutionDetailsInitScriptExecutionStatus = `SKIPPED` -const InitScriptExecutionDetailsStatusSucceeded InitScriptExecutionDetailsStatus = `SUCCEEDED` +const InitScriptExecutionDetailsInitScriptExecutionStatusSucceeded InitScriptExecutionDetailsInitScriptExecutionStatus = `SUCCEEDED` -const InitScriptExecutionDetailsStatusUnknown InitScriptExecutionDetailsStatus = `UNKNOWN` +const InitScriptExecutionDetailsInitScriptExecutionStatusUnknown InitScriptExecutionDetailsInitScriptExecutionStatus = `UNKNOWN` // String representation for [fmt.Print] -func (f *InitScriptExecutionDetailsStatus) String() string { +func (f *InitScriptExecutionDetailsInitScriptExecutionStatus) String() string { return string(*f) } // Set raw string value and validate it against allowed values -func (f *InitScriptExecutionDetailsStatus) Set(v string) error { +func (f *InitScriptExecutionDetailsInitScriptExecutionStatus) Set(v string) error { switch v { - case `FAILED_EXECUTION`, `FAILED_FETCH`, `NOT_EXECUTED`, `SKIPPED`, `SUCCEEDED`, `UNKNOWN`: - *f = InitScriptExecutionDetailsStatus(v) + case `FAILED_EXECUTION`, `FAILED_FETCH`, `FUSE_MOUNT_FAILED`, `NOT_EXECUTED`, `SKIPPED`, `SUCCEEDED`, `UNKNOWN`: + *f = InitScriptExecutionDetailsInitScriptExecutionStatus(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "FAILED_EXECUTION", "FAILED_FETCH", "NOT_EXECUTED", "SKIPPED", "SUCCEEDED", "UNKNOWN"`, v) + return fmt.Errorf(`value "%s" is not one of "FAILED_EXECUTION", "FAILED_FETCH", "FUSE_MOUNT_FAILED", "NOT_EXECUTED", "SKIPPED", "SUCCEEDED", "UNKNOWN"`, v) } } -// Type always returns InitScriptExecutionDetailsStatus to satisfy [pflag.Value] interface -func (f *InitScriptExecutionDetailsStatus) Type() string { - return "InitScriptExecutionDetailsStatus" +// Type always returns InitScriptExecutionDetailsInitScriptExecutionStatus to satisfy [pflag.Value] interface +func (f *InitScriptExecutionDetailsInitScriptExecutionStatus) Type() string { + return "InitScriptExecutionDetailsInitScriptExecutionStatus" } +// Config for an individual init script Next ID: 11 type InitScriptInfo struct { - // destination needs to be provided. e.g. `{ "abfss" : { "destination" : - // "abfss://@.dfs.core.windows.net/" - // } } + // destination needs to be provided, e.g. + // `abfss://@.dfs.core.windows.net/` Abfss *Adlsgen2Info `json:"abfss,omitempty"` - // destination needs to be provided. e.g. `{ "dbfs" : { "destination" : + // destination needs to be provided. e.g. `{ "dbfs": { "destination" : // "dbfs:/home/cluster_log" } }` Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` - // destination needs to be provided. e.g. `{ "file" : { "destination" : + // destination needs to be provided, e.g. `{ "file": { "destination": // "file:/my/local/file.sh" } }` File *LocalFileInfo `json:"file,omitempty"` - // destination needs to be provided. e.g. `{ "gcs": { "destination": + // destination needs to be provided, e.g. `{ "gcs": { "destination": // "gs://my-bucket/file.sh" } }` Gcs *GcsStorageInfo `json:"gcs,omitempty"` // destination and either the region or endpoint need to be provided. e.g. - // `{ "s3": { "destination" : "s3://cluster_log_bucket/prefix", "region" : - // "us-west-2" } }` Cluster iam role is used to access s3, please make sure - // the cluster iam role in `instance_profile_arn` has permission to write - // data to the s3 destination. + // `{ \"s3\": { \"destination\": \"s3://cluster_log_bucket/prefix\", + // \"region\": \"us-west-2\" } }` Cluster iam role is used to access s3, + // please make sure the cluster iam role in `instance_profile_arn` has + // permission to write data to the s3 destination. S3 *S3StorageInfo `json:"s3,omitempty"` - // destination needs to be provided. e.g. `{ "volumes" : { "destination" : - // "/Volumes/my-init.sh" } }` + // destination needs to be provided. e.g. `{ \"volumes\" : { \"destination\" + // : \"/Volumes/my-init.sh\" } }` Volumes *VolumesStorageInfo `json:"volumes,omitempty"` - // destination needs to be provided. e.g. `{ "workspace" : { "destination" : - // "/Users/user1@databricks.com/my-init.sh" } }` + // destination needs to be provided, e.g. `{ "workspace": { "destination": + // "/cluster-init-scripts/setup-datadog.sh" } }` Workspace *WorkspaceStorageInfo `json:"workspace,omitempty"` } type InitScriptInfoAndExecutionDetails struct { - // Details about the script - ExecutionDetails *InitScriptExecutionDetails `json:"execution_details,omitempty"` - // The script - Script *InitScriptInfo `json:"script,omitempty"` + // destination needs to be provided, e.g. + // `abfss://@.dfs.core.windows.net/` + Abfss *Adlsgen2Info `json:"abfss,omitempty"` + // destination needs to be provided. e.g. `{ "dbfs": { "destination" : + // "dbfs:/home/cluster_log" } }` + Dbfs *DbfsStorageInfo `json:"dbfs,omitempty"` + // Additional details regarding errors (such as a file not found message if + // the status is FAILED_FETCH). This field should only be used to provide + // *additional* information to the status field, not duplicate it. + ErrorMessage string `json:"error_message,omitempty"` + // The number duration of the script execution in seconds + ExecutionDurationSeconds int `json:"execution_duration_seconds,omitempty"` + // destination needs to be provided, e.g. `{ "file": { "destination": + // "file:/my/local/file.sh" } }` + File *LocalFileInfo `json:"file,omitempty"` + // destination needs to be provided, e.g. `{ "gcs": { "destination": + // "gs://my-bucket/file.sh" } }` + Gcs *GcsStorageInfo `json:"gcs,omitempty"` + // destination and either the region or endpoint need to be provided. e.g. + // `{ \"s3\": { \"destination\": \"s3://cluster_log_bucket/prefix\", + // \"region\": \"us-west-2\" } }` Cluster iam role is used to access s3, + // please make sure the cluster iam role in `instance_profile_arn` has + // permission to write data to the s3 destination. + S3 *S3StorageInfo `json:"s3,omitempty"` + // The current status of the script + Status InitScriptExecutionDetailsInitScriptExecutionStatus `json:"status,omitempty"` + // destination needs to be provided. e.g. `{ \"volumes\" : { \"destination\" + // : \"/Volumes/my-init.sh\" } }` + Volumes *VolumesStorageInfo `json:"volumes,omitempty"` + // destination needs to be provided, e.g. `{ "workspace": { "destination": + // "/cluster-init-scripts/setup-datadog.sh" } }` + Workspace *WorkspaceStorageInfo `json:"workspace,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *InitScriptInfoAndExecutionDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s InitScriptInfoAndExecutionDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type InstallLibraries struct { @@ -4192,8 +4244,8 @@ type ListAllClusterLibraryStatusesResponse struct { } type ListAvailableZonesResponse struct { - // The availability zone if no `zone_id` is provided in the cluster creation - // request. + // The availability zone if no ``zone_id`` is provided in the cluster + // creation request. DefaultZone string `json:"default_zone,omitempty"` // The list of available zones (e.g., ['us-west-2c', 'us-east-2']). Zones []string `json:"zones,omitempty"` @@ -4312,7 +4364,6 @@ func (s ListClustersRequest) MarshalJSON() ([]byte, error) { } type ListClustersResponse struct { - // Clusters []ClusterDetails `json:"clusters,omitempty"` // This field represents the pagination token to retrieve the next page of // results. If the value is "", it means no further results for the request. @@ -4342,7 +4393,6 @@ type ListClustersSortBy struct { Field ListClustersSortByField `json:"field,omitempty" url:"field,omitempty"` } -// The direction to sort by. type ListClustersSortByDirection string const ListClustersSortByDirectionAsc ListClustersSortByDirection = `ASC` @@ -4370,9 +4420,6 @@ func (f *ListClustersSortByDirection) Type() string { return "ListClustersSortByDirection" } -// The sorting criteria. By default, clusters are sorted by 3 columns from -// highest to lowest precedence: cluster state, pinned or unpinned, then cluster -// name. type ListClustersSortByField string const ListClustersSortByFieldClusterName ListClustersSortByField = `CLUSTER_NAME` @@ -4486,7 +4533,6 @@ func (f *ListSortColumn) Type() string { return "ListSortColumn" } -// A generic ordering enum for list-based queries. type ListSortOrder string const ListSortOrderAsc ListSortOrder = `ASC` @@ -4520,9 +4566,8 @@ type LocalFileInfo struct { } type LogAnalyticsInfo struct { - // LogAnalyticsPrimaryKey string `json:"log_analytics_primary_key,omitempty"` - // + LogAnalyticsWorkspaceId string `json:"log_analytics_workspace_id,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -4536,6 +4581,7 @@ func (s LogAnalyticsInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// The log delivery status type LogSyncStatus struct { // The timestamp of last attempt. If the last attempt fails, // `last_exception` will contain the exception in the last attempt. @@ -4579,15 +4625,21 @@ func (s MavenLibrary) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// This structure embodies the machine type that hosts spark containers Note: +// this should be an internal data structure for now It is defined in proto in +// case we want to send it over the wire in the future (which is likely) type NodeInstanceType struct { - InstanceTypeId string `json:"instance_type_id,omitempty"` - + // Unique identifier across instance types + InstanceTypeId string `json:"instance_type_id"` + // Size of the individual local disks attached to this instance (i.e. per + // local disk). LocalDiskSizeGb int `json:"local_disk_size_gb,omitempty"` - + // Number of local disks that are present on this instance. LocalDisks int `json:"local_disks,omitempty"` - + // Size of the individual local nvme disks attached to this instance (i.e. + // per local disk). LocalNvmeDiskSizeGb int `json:"local_nvme_disk_size_gb,omitempty"` - + // Number of local nvme disks that are present on this instance. LocalNvmeDisks int `json:"local_nvme_disks,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -4601,11 +4653,16 @@ func (s NodeInstanceType) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// A description of a Spark node type including both the dimensions of the node +// and the instance type on which it will be hosted. type NodeType struct { - Category string `json:"category,omitempty"` + // A descriptive category for this node type. Examples include "Memory + // Optimized" and "Compute Optimized". + Category string `json:"category"` // A string description associated with this node type, e.g., "r3.xlarge". Description string `json:"description"` - + // An optional hint at the display order of node types in the UI. Within a + // node type category, lowest numbers come first. DisplayOrder int `json:"display_order,omitempty"` // An identifier for the type of hardware that this node runs on, e.g., // "r3.2xlarge" in AWS. @@ -4616,17 +4673,17 @@ type NodeType struct { // AWS specific, whether this instance supports encryption in transit, used // for hipaa and pci workloads. IsEncryptedInTransit bool `json:"is_encrypted_in_transit,omitempty"` - + // Whether this is an Arm-based instance. IsGraviton bool `json:"is_graviton,omitempty"` - + // Whether this node is hidden from presentation in the UI. IsHidden bool `json:"is_hidden,omitempty"` - + // Whether this node comes with IO cache enabled by default. IsIoCacheEnabled bool `json:"is_io_cache_enabled,omitempty"` // Memory (in MB) available for this node type. MemoryMb int `json:"memory_mb"` - + // A collection of node type info reported by the cloud provider NodeInfo *CloudProviderNodeInfo `json:"node_info,omitempty"` - + // The NodeInstanceType object corresponding to instance_type_id NodeInstanceType *NodeInstanceType `json:"node_instance_type,omitempty"` // Unique identifier for this node type. NodeTypeId string `json:"node_type_id"` @@ -4634,21 +4691,20 @@ type NodeType struct { // fractional, e.g., 2.5 cores, if the the number of cores on a machine // instance is not divisible by the number of Spark nodes on that machine. NumCores float64 `json:"num_cores"` - + // Number of GPUs available for this node type. NumGpus int `json:"num_gpus,omitempty"` PhotonDriverCapable bool `json:"photon_driver_capable,omitempty"` PhotonWorkerCapable bool `json:"photon_worker_capable,omitempty"` - + // Whether this node type support cluster tags. SupportClusterTags bool `json:"support_cluster_tags,omitempty"` - + // Whether this node type support EBS volumes. EBS volumes is disabled for + // node types that we could place multiple corresponding containers on the + // same hosting instance. SupportEbsVolumes bool `json:"support_ebs_volumes,omitempty"` - + // Whether this node type supports port forwarding. SupportPortForwarding bool `json:"support_port_forwarding,omitempty"` - // Indicates if this node type can be used for an instance pool or cluster - // with elastic disk enabled. This is true for most node types. - SupportsElasticDisk bool `json:"supports_elastic_disk,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -4687,7 +4743,6 @@ type PermanentDeleteClusterResponse struct { } type PinCluster struct { - // ClusterId string `json:"cluster_id"` } @@ -4859,7 +4914,7 @@ type ResizeClusterResponse struct { type RestartCluster struct { // The cluster to be started. ClusterId string `json:"cluster_id"` - // + RestartUser string `json:"restart_user,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -4943,14 +4998,6 @@ func (s Results) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Determines the cluster's runtime engine, either standard or Photon. -// -// This field is not compatible with legacy `spark_version` values that contain -// `-photon-`. Remove `-photon-` from the `spark_version` and set -// `runtime_engine` to `PHOTON`. -// -// If left unspecified, the runtime engine defaults to standard unless the -// spark_version contains -photon-, in which case Photon will be used. type RuntimeEngine string const RuntimeEngineNull RuntimeEngine = `NULL` @@ -4980,6 +5027,7 @@ func (f *RuntimeEngine) Type() string { return "RuntimeEngine" } +// A storage location in Amazon S3 type S3StorageInfo struct { // (Optional) Set canned access control list for the logs, e.g. // `bucket-owner-full-control`. If `canned_cal` is set, please make sure the @@ -5023,6 +5071,7 @@ func (s S3StorageInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Describes a specific Spark driver or executor. type SparkNode struct { // The private IP address of the host instance. HostPrivateIp string `json:"host_private_ip,omitempty"` @@ -5039,15 +5088,8 @@ type SparkNode struct { // Spark JDBC server on the driver node. To communicate with the JDBC // server, traffic must be manually authorized by adding security group // rules to the "worker-unmanaged" security group via the AWS console. - // - // Actually it's the public DNS address of the host instance. PublicDns string `json:"public_dns,omitempty"` // The timestamp (in millisecond) when the Spark node is launched. - // - // The start_timestamp is set right before the container is being launched. - // The timestamp when the container is placed on the ResourceManager, before - // its launch and setup by the NodeDaemon. This timestamp is the same as the - // creation timestamp in the database. StartTimestamp int64 `json:"start_timestamp,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -5061,6 +5103,7 @@ func (s SparkNode) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Attributes specific to AWS for a Spark node. type SparkNodeAwsAttributes struct { // Whether this node is on an Amazon spot instance. IsSpot bool `json:"is_spot,omitempty"` @@ -5105,7 +5148,13 @@ type StartCluster struct { type StartClusterResponse struct { } -// Current state of the cluster. +// The state of a Cluster. The current allowable state transitions are as +// follows: +// +// - `PENDING` -> `RUNNING` - `PENDING` -> `TERMINATING` - `RUNNING` -> +// `RESIZING` - `RUNNING` -> `RESTARTING` - `RUNNING` -> `TERMINATING` - +// `RESTARTING` -> `RUNNING` - `RESTARTING` -> `TERMINATING` - `RESIZING` -> +// `RUNNING` - `RESIZING` -> `TERMINATING` - `TERMINATING` -> `TERMINATED` type State string const StateError State = `ERROR` @@ -5155,23 +5204,49 @@ type TerminationReason struct { Type TerminationReasonType `json:"type,omitempty"` } -// status code indicating why the cluster was terminated +// The status code indicating why the cluster was terminated type TerminationReasonCode string const TerminationReasonCodeAbuseDetected TerminationReasonCode = `ABUSE_DETECTED` +const TerminationReasonCodeAccessTokenFailure TerminationReasonCode = `ACCESS_TOKEN_FAILURE` + +const TerminationReasonCodeAllocationTimeout TerminationReasonCode = `ALLOCATION_TIMEOUT` + +const TerminationReasonCodeAllocationTimeoutNodeDaemonNotReady TerminationReasonCode = `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY` + +const TerminationReasonCodeAllocationTimeoutNoHealthyClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS` + +const TerminationReasonCodeAllocationTimeoutNoMatchedClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS` + +const TerminationReasonCodeAllocationTimeoutNoReadyClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS` + +const TerminationReasonCodeAllocationTimeoutNoUnallocatedClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS` + +const TerminationReasonCodeAllocationTimeoutNoWarmedUpClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS` + const TerminationReasonCodeAttachProjectFailure TerminationReasonCode = `ATTACH_PROJECT_FAILURE` const TerminationReasonCodeAwsAuthorizationFailure TerminationReasonCode = `AWS_AUTHORIZATION_FAILURE` +const TerminationReasonCodeAwsInaccessibleKmsKeyFailure TerminationReasonCode = `AWS_INACCESSIBLE_KMS_KEY_FAILURE` + +const TerminationReasonCodeAwsInstanceProfileUpdateFailure TerminationReasonCode = `AWS_INSTANCE_PROFILE_UPDATE_FAILURE` + const TerminationReasonCodeAwsInsufficientFreeAddressesInSubnetFailure TerminationReasonCode = `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE` const TerminationReasonCodeAwsInsufficientInstanceCapacityFailure TerminationReasonCode = `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE` +const TerminationReasonCodeAwsInvalidKeyPair TerminationReasonCode = `AWS_INVALID_KEY_PAIR` + +const TerminationReasonCodeAwsInvalidKmsKeyState TerminationReasonCode = `AWS_INVALID_KMS_KEY_STATE` + const TerminationReasonCodeAwsMaxSpotInstanceCountExceededFailure TerminationReasonCode = `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE` const TerminationReasonCodeAwsRequestLimitExceeded TerminationReasonCode = `AWS_REQUEST_LIMIT_EXCEEDED` +const TerminationReasonCodeAwsResourceQuotaExceeded TerminationReasonCode = `AWS_RESOURCE_QUOTA_EXCEEDED` + const TerminationReasonCodeAwsUnsupportedFailure TerminationReasonCode = `AWS_UNSUPPORTED_FAILURE` const TerminationReasonCodeAzureByokKeyPermissionFailure TerminationReasonCode = `AZURE_BYOK_KEY_PERMISSION_FAILURE` @@ -5182,6 +5257,8 @@ const TerminationReasonCodeAzureInvalidDeploymentTemplate TerminationReasonCode const TerminationReasonCodeAzureOperationNotAllowedException TerminationReasonCode = `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION` +const TerminationReasonCodeAzurePackedDeploymentPartialFailure TerminationReasonCode = `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE` + const TerminationReasonCodeAzureQuotaExceededException TerminationReasonCode = `AZURE_QUOTA_EXCEEDED_EXCEPTION` const TerminationReasonCodeAzureResourceManagerThrottling TerminationReasonCode = `AZURE_RESOURCE_MANAGER_THROTTLING` @@ -5198,36 +5275,110 @@ const TerminationReasonCodeBootstrapTimeout TerminationReasonCode = `BOOTSTRAP_T const TerminationReasonCodeBootstrapTimeoutCloudProviderException TerminationReasonCode = `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION` +const TerminationReasonCodeBootstrapTimeoutDueToMisconfig TerminationReasonCode = `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG` + +const TerminationReasonCodeBudgetPolicyLimitEnforcementActivated TerminationReasonCode = `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED` + +const TerminationReasonCodeBudgetPolicyResolutionFailure TerminationReasonCode = `BUDGET_POLICY_RESOLUTION_FAILURE` + +const TerminationReasonCodeCloudAccountSetupFailure TerminationReasonCode = `CLOUD_ACCOUNT_SETUP_FAILURE` + +const TerminationReasonCodeCloudOperationCancelled TerminationReasonCode = `CLOUD_OPERATION_CANCELLED` + const TerminationReasonCodeCloudProviderDiskSetupFailure TerminationReasonCode = `CLOUD_PROVIDER_DISK_SETUP_FAILURE` +const TerminationReasonCodeCloudProviderInstanceNotLaunched TerminationReasonCode = `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED` + const TerminationReasonCodeCloudProviderLaunchFailure TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE` +const TerminationReasonCodeCloudProviderLaunchFailureDueToMisconfig TerminationReasonCode = `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG` + const TerminationReasonCodeCloudProviderResourceStockout TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT` +const TerminationReasonCodeCloudProviderResourceStockoutDueToMisconfig TerminationReasonCode = `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG` + const TerminationReasonCodeCloudProviderShutdown TerminationReasonCode = `CLOUD_PROVIDER_SHUTDOWN` +const TerminationReasonCodeClusterOperationThrottled TerminationReasonCode = `CLUSTER_OPERATION_THROTTLED` + +const TerminationReasonCodeClusterOperationTimeout TerminationReasonCode = `CLUSTER_OPERATION_TIMEOUT` + const TerminationReasonCodeCommunicationLost TerminationReasonCode = `COMMUNICATION_LOST` const TerminationReasonCodeContainerLaunchFailure TerminationReasonCode = `CONTAINER_LAUNCH_FAILURE` const TerminationReasonCodeControlPlaneRequestFailure TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE` +const TerminationReasonCodeControlPlaneRequestFailureDueToMisconfig TerminationReasonCode = `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG` + const TerminationReasonCodeDatabaseConnectionFailure TerminationReasonCode = `DATABASE_CONNECTION_FAILURE` +const TerminationReasonCodeDataAccessConfigChanged TerminationReasonCode = `DATA_ACCESS_CONFIG_CHANGED` + const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_COMPONENT_UNHEALTHY` +const TerminationReasonCodeDisasterRecoveryReplication TerminationReasonCode = `DISASTER_RECOVERY_REPLICATION` + const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE` +const TerminationReasonCodeDriverEviction TerminationReasonCode = `DRIVER_EVICTION` + +const TerminationReasonCodeDriverLaunchTimeout TerminationReasonCode = `DRIVER_LAUNCH_TIMEOUT` + +const TerminationReasonCodeDriverNodeUnreachable TerminationReasonCode = `DRIVER_NODE_UNREACHABLE` + +const TerminationReasonCodeDriverOutOfDisk TerminationReasonCode = `DRIVER_OUT_OF_DISK` + +const TerminationReasonCodeDriverOutOfMemory TerminationReasonCode = `DRIVER_OUT_OF_MEMORY` + +const TerminationReasonCodeDriverPodCreationFailure TerminationReasonCode = `DRIVER_POD_CREATION_FAILURE` + +const TerminationReasonCodeDriverUnexpectedFailure TerminationReasonCode = `DRIVER_UNEXPECTED_FAILURE` + const TerminationReasonCodeDriverUnreachable TerminationReasonCode = `DRIVER_UNREACHABLE` const TerminationReasonCodeDriverUnresponsive TerminationReasonCode = `DRIVER_UNRESPONSIVE` +const TerminationReasonCodeDynamicSparkConfSizeExceeded TerminationReasonCode = `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED` + +const TerminationReasonCodeEosSparkImage TerminationReasonCode = `EOS_SPARK_IMAGE` + const TerminationReasonCodeExecutionComponentUnhealthy TerminationReasonCode = `EXECUTION_COMPONENT_UNHEALTHY` +const TerminationReasonCodeExecutorPodUnscheduled TerminationReasonCode = `EXECUTOR_POD_UNSCHEDULED` + +const TerminationReasonCodeGcpApiRateQuotaExceeded TerminationReasonCode = `GCP_API_RATE_QUOTA_EXCEEDED` + +const TerminationReasonCodeGcpForbidden TerminationReasonCode = `GCP_FORBIDDEN` + +const TerminationReasonCodeGcpIamTimeout TerminationReasonCode = `GCP_IAM_TIMEOUT` + +const TerminationReasonCodeGcpInaccessibleKmsKeyFailure TerminationReasonCode = `GCP_INACCESSIBLE_KMS_KEY_FAILURE` + +const TerminationReasonCodeGcpInsufficientCapacity TerminationReasonCode = `GCP_INSUFFICIENT_CAPACITY` + +const TerminationReasonCodeGcpIpSpaceExhausted TerminationReasonCode = `GCP_IP_SPACE_EXHAUSTED` + +const TerminationReasonCodeGcpKmsKeyPermissionDenied TerminationReasonCode = `GCP_KMS_KEY_PERMISSION_DENIED` + +const TerminationReasonCodeGcpNotFound TerminationReasonCode = `GCP_NOT_FOUND` + const TerminationReasonCodeGcpQuotaExceeded TerminationReasonCode = `GCP_QUOTA_EXCEEDED` +const TerminationReasonCodeGcpResourceQuotaExceeded TerminationReasonCode = `GCP_RESOURCE_QUOTA_EXCEEDED` + +const TerminationReasonCodeGcpServiceAccountAccessDenied TerminationReasonCode = `GCP_SERVICE_ACCOUNT_ACCESS_DENIED` + const TerminationReasonCodeGcpServiceAccountDeleted TerminationReasonCode = `GCP_SERVICE_ACCOUNT_DELETED` +const TerminationReasonCodeGcpServiceAccountNotFound TerminationReasonCode = `GCP_SERVICE_ACCOUNT_NOT_FOUND` + +const TerminationReasonCodeGcpSubnetNotReady TerminationReasonCode = `GCP_SUBNET_NOT_READY` + +const TerminationReasonCodeGcpTrustedImageProjectsViolated TerminationReasonCode = `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED` + +const TerminationReasonCodeGkeBasedClusterTermination TerminationReasonCode = `GKE_BASED_CLUSTER_TERMINATION` + const TerminationReasonCodeGlobalInitScriptFailure TerminationReasonCode = `GLOBAL_INIT_SCRIPT_FAILURE` const TerminationReasonCodeHiveMetastoreProvisioningFailure TerminationReasonCode = `HIVE_METASTORE_PROVISIONING_FAILURE` @@ -5236,18 +5387,36 @@ const TerminationReasonCodeImagePullPermissionDenied TerminationReasonCode = `IM const TerminationReasonCodeInactivity TerminationReasonCode = `INACTIVITY` +const TerminationReasonCodeInitContainerNotFinished TerminationReasonCode = `INIT_CONTAINER_NOT_FINISHED` + const TerminationReasonCodeInitScriptFailure TerminationReasonCode = `INIT_SCRIPT_FAILURE` const TerminationReasonCodeInstancePoolClusterFailure TerminationReasonCode = `INSTANCE_POOL_CLUSTER_FAILURE` +const TerminationReasonCodeInstancePoolMaxCapacityReached TerminationReasonCode = `INSTANCE_POOL_MAX_CAPACITY_REACHED` + +const TerminationReasonCodeInstancePoolNotFound TerminationReasonCode = `INSTANCE_POOL_NOT_FOUND` + const TerminationReasonCodeInstanceUnreachable TerminationReasonCode = `INSTANCE_UNREACHABLE` +const TerminationReasonCodeInstanceUnreachableDueToMisconfig TerminationReasonCode = `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG` + +const TerminationReasonCodeInternalCapacityFailure TerminationReasonCode = `INTERNAL_CAPACITY_FAILURE` + const TerminationReasonCodeInternalError TerminationReasonCode = `INTERNAL_ERROR` const TerminationReasonCodeInvalidArgument TerminationReasonCode = `INVALID_ARGUMENT` +const TerminationReasonCodeInvalidAwsParameter TerminationReasonCode = `INVALID_AWS_PARAMETER` + +const TerminationReasonCodeInvalidInstancePlacementProtocol TerminationReasonCode = `INVALID_INSTANCE_PLACEMENT_PROTOCOL` + const TerminationReasonCodeInvalidSparkImage TerminationReasonCode = `INVALID_SPARK_IMAGE` +const TerminationReasonCodeInvalidWorkerImageFailure TerminationReasonCode = `INVALID_WORKER_IMAGE_FAILURE` + +const TerminationReasonCodeInPenaltyBox TerminationReasonCode = `IN_PENALTY_BOX` + const TerminationReasonCodeIpExhaustionFailure TerminationReasonCode = `IP_EXHAUSTION_FAILURE` const TerminationReasonCodeJobFinished TerminationReasonCode = `JOB_FINISHED` @@ -5256,28 +5425,48 @@ const TerminationReasonCodeK8sAutoscalingFailure TerminationReasonCode = `K8S_AU const TerminationReasonCodeK8sDbrClusterLaunchTimeout TerminationReasonCode = `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT` +const TerminationReasonCodeLazyAllocationTimeout TerminationReasonCode = `LAZY_ALLOCATION_TIMEOUT` + +const TerminationReasonCodeMaintenanceMode TerminationReasonCode = `MAINTENANCE_MODE` + const TerminationReasonCodeMetastoreComponentUnhealthy TerminationReasonCode = `METASTORE_COMPONENT_UNHEALTHY` const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEPHOS_RESOURCE_MANAGEMENT` +const TerminationReasonCodeNetvisorSetupTimeout TerminationReasonCode = `NETVISOR_SETUP_TIMEOUT` + const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE` const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE` +const TerminationReasonCodeNoMatchedK8s TerminationReasonCode = `NO_MATCHED_K8S` + +const TerminationReasonCodeNoMatchedK8sTestingTag TerminationReasonCode = `NO_MATCHED_K8S_TESTING_TAG` + const TerminationReasonCodeNpipTunnelSetupFailure TerminationReasonCode = `NPIP_TUNNEL_SETUP_FAILURE` const TerminationReasonCodeNpipTunnelTokenFailure TerminationReasonCode = `NPIP_TUNNEL_TOKEN_FAILURE` +const TerminationReasonCodePodAssignmentFailure TerminationReasonCode = `POD_ASSIGNMENT_FAILURE` + +const TerminationReasonCodePodSchedulingFailure TerminationReasonCode = `POD_SCHEDULING_FAILURE` + const TerminationReasonCodeRequestRejected TerminationReasonCode = `REQUEST_REJECTED` const TerminationReasonCodeRequestThrottled TerminationReasonCode = `REQUEST_THROTTLED` +const TerminationReasonCodeResourceUsageBlocked TerminationReasonCode = `RESOURCE_USAGE_BLOCKED` + +const TerminationReasonCodeSecretCreationFailure TerminationReasonCode = `SECRET_CREATION_FAILURE` + const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR` const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION` const TerminationReasonCodeSelfBootstrapFailure TerminationReasonCode = `SELF_BOOTSTRAP_FAILURE` +const TerminationReasonCodeServerlessLongRunningTerminated TerminationReasonCode = `SERVERLESS_LONG_RUNNING_TERMINATED` + const TerminationReasonCodeSkippedSlowNodes TerminationReasonCode = `SKIPPED_SLOW_NODES` const TerminationReasonCodeSlowImageDownload TerminationReasonCode = `SLOW_IMAGE_DOWNLOAD` @@ -5286,12 +5475,24 @@ const TerminationReasonCodeSparkError TerminationReasonCode = `SPARK_ERROR` const TerminationReasonCodeSparkImageDownloadFailure TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_FAILURE` +const TerminationReasonCodeSparkImageDownloadThrottled TerminationReasonCode = `SPARK_IMAGE_DOWNLOAD_THROTTLED` + +const TerminationReasonCodeSparkImageNotFound TerminationReasonCode = `SPARK_IMAGE_NOT_FOUND` + const TerminationReasonCodeSparkStartupFailure TerminationReasonCode = `SPARK_STARTUP_FAILURE` const TerminationReasonCodeSpotInstanceTermination TerminationReasonCode = `SPOT_INSTANCE_TERMINATION` +const TerminationReasonCodeSshBootstrapFailure TerminationReasonCode = `SSH_BOOTSTRAP_FAILURE` + const TerminationReasonCodeStorageDownloadFailure TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE` +const TerminationReasonCodeStorageDownloadFailureDueToMisconfig TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG` + +const TerminationReasonCodeStorageDownloadFailureSlow TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE_SLOW` + +const TerminationReasonCodeStorageDownloadFailureThrottled TerminationReasonCode = `STORAGE_DOWNLOAD_FAILURE_THROTTLED` + const TerminationReasonCodeStsClientSetupFailure TerminationReasonCode = `STS_CLIENT_SETUP_FAILURE` const TerminationReasonCodeSubnetExhaustedFailure TerminationReasonCode = `SUBNET_EXHAUSTED_FAILURE` @@ -5302,12 +5503,16 @@ const TerminationReasonCodeTrialExpired TerminationReasonCode = `TRIAL_EXPIRED` const TerminationReasonCodeUnexpectedLaunchFailure TerminationReasonCode = `UNEXPECTED_LAUNCH_FAILURE` +const TerminationReasonCodeUnexpectedPodRecreation TerminationReasonCode = `UNEXPECTED_POD_RECREATION` + const TerminationReasonCodeUnknown TerminationReasonCode = `UNKNOWN` const TerminationReasonCodeUnsupportedInstanceType TerminationReasonCode = `UNSUPPORTED_INSTANCE_TYPE` const TerminationReasonCodeUpdateInstanceProfileFailure TerminationReasonCode = `UPDATE_INSTANCE_PROFILE_FAILURE` +const TerminationReasonCodeUserInitiatedVmTermination TerminationReasonCode = `USER_INITIATED_VM_TERMINATION` + const TerminationReasonCodeUserRequest TerminationReasonCode = `USER_REQUEST` const TerminationReasonCodeWorkerSetupFailure TerminationReasonCode = `WORKER_SETUP_FAILURE` @@ -5316,6 +5521,8 @@ const TerminationReasonCodeWorkspaceCancelledError TerminationReasonCode = `WORK const TerminationReasonCodeWorkspaceConfigurationError TerminationReasonCode = `WORKSPACE_CONFIGURATION_ERROR` +const TerminationReasonCodeWorkspaceUpdate TerminationReasonCode = `WORKSPACE_UPDATE` + // String representation for [fmt.Print] func (f *TerminationReasonCode) String() string { return string(*f) @@ -5324,11 +5531,11 @@ func (f *TerminationReasonCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationReasonCode) Set(v string) error { switch v { - case `ABUSE_DETECTED`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_SHUTDOWN`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `DATABASE_CONNECTION_FAILURE`, `DBFS_COMPONENT_UNHEALTHY`, `DOCKER_IMAGE_PULL_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `EXECUTION_COMPONENT_UNHEALTHY`, `GCP_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_UNREACHABLE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_SPARK_IMAGE`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `STORAGE_DOWNLOAD_FAILURE`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`: + case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DOCKER_IMAGE_PULL_FAILURE`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: *f = TerminationReasonCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_SHUTDOWN", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "DATABASE_CONNECTION_FAILURE", "DBFS_COMPONENT_UNHEALTHY", "DOCKER_IMAGE_PULL_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "EXECUTION_COMPONENT_UNHEALTHY", "GCP_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_DELETED", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_UNREACHABLE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_SPARK_IMAGE", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "STORAGE_DOWNLOAD_FAILURE", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR"`, v) + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DOCKER_IMAGE_PULL_FAILURE", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) } } @@ -5380,7 +5587,6 @@ type UninstallLibrariesResponse struct { } type UnpinCluster struct { - // ClusterId string `json:"cluster_id"` } @@ -5392,11 +5598,20 @@ type UpdateCluster struct { Cluster *UpdateClusterResource `json:"cluster,omitempty"` // ID of the cluster. ClusterId string `json:"cluster_id"` - // Specifies which fields of the cluster will be updated. This is required - // in the POST request. The update mask should be supplied as a single - // string. To specify multiple fields, separate them with commas (no - // spaces). To delete a field from a cluster configuration, add it to the - // `update_mask` string but omit it from the `cluster` object. + // Used to specify which cluster attributes and size fields to update. See + // https://google.aip.dev/161 for more details. + // + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. UpdateMask string `json:"update_mask"` } @@ -5467,7 +5682,7 @@ type UpdateClusterResource struct { // Passthrough on standard clusters. * `LEGACY_SINGLE_USER_STANDARD`: This // mode provides a way that doesn’t have UC nor passthrough enabled. DataSecurityMode DataSecurityMode `json:"data_security_mode,omitempty"` - + // Custom docker image BYOC DockerImage *DockerImage `json:"docker_image,omitempty"` // The optional ID of the instance pool for the driver of the cluster // belongs. The pool cluster uses the instance pool with id @@ -5476,6 +5691,11 @@ type UpdateClusterResource struct { // The node type of the Spark driver. Note that this field is optional; if // unset, the driver node type will be set as the same value as // `node_type_id` defined above. + // + // This field, along with node_type_id, should not be set if + // virtual_cluster_size is set. If both driver_node_type_id, node_type_id, + // and virtual_cluster_size are specified, driver_node_type_id and + // node_type_id take precedence. DriverNodeTypeId string `json:"driver_node_type_id,omitempty"` // Autoscaling Local Storage: when enabled, this cluster will dynamically // acquire additional disk space when its Spark workers are running low on @@ -5581,7 +5801,7 @@ type UpdateClusterResource struct { // this field `use_ml_runtime`, and whether `node_type_id` is gpu node or // not. UseMlRuntime bool `json:"use_ml_runtime,omitempty"` - + // Cluster Attributes showing for clusters workload types. WorkloadType *WorkloadType `json:"workload_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -5601,19 +5821,22 @@ type UpdateClusterResponse struct { type UpdateResponse struct { } +// A storage location back by UC Volumes. type VolumesStorageInfo struct { - // Unity Catalog volumes file destination, e.g. - // `/Volumes/catalog/schema/volume/dir/file` + // UC Volumes destination, e.g. + // `/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh` or + // `dbfs:/Volumes/catalog/schema/vol1/init-scripts/setup-datadog.sh` Destination string `json:"destination"` } +// Cluster Attributes showing for clusters workload types. type WorkloadType struct { // defined what type of clients can use the cluster. E.g. Notebooks, Jobs Clients ClientsTypes `json:"clients"` } +// A storage location in Workspace Filesystem (WSFS) type WorkspaceStorageInfo struct { - // workspace files destination, e.g. - // `/Users/user1@databricks.com/my-init.sh` + // wsfs destination, e.g. `workspace:/cluster-init-scripts/setup-datadog.sh` Destination string `json:"destination"` } diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 5355772f0..b12d265ef 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -46,6 +46,13 @@ type GenieInterface interface { // Execute the SQL query in the message. ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) + // Generate full query result download. + // + // Initiate full SQL query result download and obtain a transient ID for + // tracking the download progress. This call initiates a new SQL execution to + // generate the query result. + GenerateDownloadFullQueryResult(ctx context.Context, request GenieGenerateDownloadFullQueryResultRequest) (*GenieGenerateDownloadFullQueryResultResponse, error) + // Get conversation message. // // Get message from conversation. diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index 8bff281d2..0fc08d294 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -48,6 +48,16 @@ func (a *genieImpl) ExecuteMessageQuery(ctx context.Context, request GenieExecut return &genieGetMessageQueryResultResponse, err } +func (a *genieImpl) GenerateDownloadFullQueryResult(ctx context.Context, request GenieGenerateDownloadFullQueryResultRequest) (*GenieGenerateDownloadFullQueryResultResponse, error) { + var genieGenerateDownloadFullQueryResultResponse GenieGenerateDownloadFullQueryResultResponse + path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/attachments/%v/generate-download", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &genieGenerateDownloadFullQueryResultResponse) + return &genieGenerateDownloadFullQueryResultResponse, err +} + func (a *genieImpl) GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) { var genieMessage GenieMessage path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v", request.SpaceId, request.ConversationId, request.MessageId) diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index a53486dc4..983d37125 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -31,6 +31,13 @@ type GenieService interface { // Execute the SQL query in the message. ExecuteMessageQuery(ctx context.Context, request GenieExecuteMessageQueryRequest) (*GenieGetMessageQueryResultResponse, error) + // Generate full query result download. + // + // Initiate full SQL query result download and obtain a transient ID for + // tracking the download progress. This call initiates a new SQL execution + // to generate the query result. + GenerateDownloadFullQueryResult(ctx context.Context, request GenieGenerateDownloadFullQueryResultRequest) (*GenieGenerateDownloadFullQueryResultResponse, error) + // Get conversation message. // // Get message from conversation. diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 64074e89d..3482b74a0 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -304,6 +304,38 @@ type GenieExecuteMessageQueryRequest struct { SpaceId string `json:"-" url:"-"` } +// Generate full query result download +type GenieGenerateDownloadFullQueryResultRequest struct { + // Attachment ID + AttachmentId string `json:"-" url:"-"` + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Space ID + SpaceId string `json:"-" url:"-"` +} + +type GenieGenerateDownloadFullQueryResultResponse struct { + // Error message if Genie failed to download the result + Error string `json:"error,omitempty"` + // Download result status + Status MessageStatus `json:"status,omitempty"` + // Transient Statement ID. Use this ID to track the download request in + // subsequent polling calls + TransientStatementId string `json:"transient_statement_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GenieGenerateDownloadFullQueryResultResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GenieGenerateDownloadFullQueryResultResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get conversation message type GenieGetConversationMessageRequest struct { // The ID associated with the target conversation. @@ -735,6 +767,8 @@ const MessageErrorTypeFunctionArgumentsInvalidTypeException MessageErrorType = ` const MessageErrorTypeFunctionCallMissingParameterException MessageErrorType = `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION` +const MessageErrorTypeGeneratedSqlQueryTooLongException MessageErrorType = `GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION` + const MessageErrorTypeGenericChatCompletionException MessageErrorType = `GENERIC_CHAT_COMPLETION_EXCEPTION` const MessageErrorTypeGenericChatCompletionServiceException MessageErrorType = `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION` @@ -763,6 +797,8 @@ const MessageErrorTypeMessageDeletedWhileExecutingException MessageErrorType = ` const MessageErrorTypeMessageUpdatedWhileExecutingException MessageErrorType = `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION` +const MessageErrorTypeMissingSqlQueryException MessageErrorType = `MISSING_SQL_QUERY_EXCEPTION` + const MessageErrorTypeNoDeploymentsAvailableToWorkspace MessageErrorType = `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE` const MessageErrorTypeNoQueryToVisualizeException MessageErrorType = `NO_QUERY_TO_VISUALIZE_EXCEPTION` @@ -803,11 +839,11 @@ func (f *MessageErrorType) String() string { // Set raw string value and validate it against allowed values func (f *MessageErrorType) Set(v string) error { switch v { - case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: + case `BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION`, `CHAT_COMPLETION_CLIENT_EXCEPTION`, `CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION`, `CHAT_COMPLETION_NETWORK_EXCEPTION`, `CONTENT_FILTER_EXCEPTION`, `CONTEXT_EXCEEDED_EXCEPTION`, `COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION`, `COULD_NOT_GET_UC_SCHEMA_EXCEPTION`, `DEPLOYMENT_NOT_FOUND_EXCEPTION`, `FUNCTIONS_NOT_AVAILABLE_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION`, `FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION`, `FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION`, `GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION`, `GENERIC_CHAT_COMPLETION_EXCEPTION`, `GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION`, `GENERIC_SQL_EXEC_API_CALL_EXCEPTION`, `ILLEGAL_PARAMETER_DEFINITION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION`, `INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION`, `INVALID_CHAT_COMPLETION_JSON_EXCEPTION`, `INVALID_COMPLETION_REQUEST_EXCEPTION`, `INVALID_FUNCTION_CALL_EXCEPTION`, `INVALID_TABLE_IDENTIFIER_EXCEPTION`, `LOCAL_CONTEXT_EXCEEDED_EXCEPTION`, `MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION`, `MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION`, `MISSING_SQL_QUERY_EXCEPTION`, `NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE`, `NO_QUERY_TO_VISUALIZE_EXCEPTION`, `NO_TABLES_TO_QUERY_EXCEPTION`, `RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION`, `RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION`, `REPLY_PROCESS_TIMEOUT_EXCEPTION`, `RETRYABLE_PROCESSING_EXCEPTION`, `SQL_EXECUTION_EXCEPTION`, `STOP_PROCESS_DUE_TO_AUTO_REGENERATE`, `TABLES_MISSING_EXCEPTION`, `TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION`, `TOO_MANY_TABLES_EXCEPTION`, `UNEXPECTED_REPLY_PROCESS_EXCEPTION`, `UNKNOWN_AI_MODEL`, `WAREHOUSE_ACCESS_MISSING_EXCEPTION`, `WAREHOUSE_NOT_FOUND_EXCEPTION`: *f = MessageErrorType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) + return fmt.Errorf(`value "%s" is not one of "BLOCK_MULTIPLE_EXECUTIONS_EXCEPTION", "CHAT_COMPLETION_CLIENT_EXCEPTION", "CHAT_COMPLETION_CLIENT_TIMEOUT_EXCEPTION", "CHAT_COMPLETION_NETWORK_EXCEPTION", "CONTENT_FILTER_EXCEPTION", "CONTEXT_EXCEEDED_EXCEPTION", "COULD_NOT_GET_MODEL_DEPLOYMENTS_EXCEPTION", "COULD_NOT_GET_UC_SCHEMA_EXCEPTION", "DEPLOYMENT_NOT_FOUND_EXCEPTION", "FUNCTIONS_NOT_AVAILABLE_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_JSON_EXCEPTION", "FUNCTION_ARGUMENTS_INVALID_TYPE_EXCEPTION", "FUNCTION_CALL_MISSING_PARAMETER_EXCEPTION", "GENERATED_SQL_QUERY_TOO_LONG_EXCEPTION", "GENERIC_CHAT_COMPLETION_EXCEPTION", "GENERIC_CHAT_COMPLETION_SERVICE_EXCEPTION", "GENERIC_SQL_EXEC_API_CALL_EXCEPTION", "ILLEGAL_PARAMETER_DEFINITION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_FUNCTION_EXCEPTION", "INVALID_CERTIFIED_ANSWER_IDENTIFIER_EXCEPTION", "INVALID_CHAT_COMPLETION_JSON_EXCEPTION", "INVALID_COMPLETION_REQUEST_EXCEPTION", "INVALID_FUNCTION_CALL_EXCEPTION", "INVALID_TABLE_IDENTIFIER_EXCEPTION", "LOCAL_CONTEXT_EXCEEDED_EXCEPTION", "MESSAGE_CANCELLED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_DELETED_WHILE_EXECUTING_EXCEPTION", "MESSAGE_UPDATED_WHILE_EXECUTING_EXCEPTION", "MISSING_SQL_QUERY_EXCEPTION", "NO_DEPLOYMENTS_AVAILABLE_TO_WORKSPACE", "NO_QUERY_TO_VISUALIZE_EXCEPTION", "NO_TABLES_TO_QUERY_EXCEPTION", "RATE_LIMIT_EXCEEDED_GENERIC_EXCEPTION", "RATE_LIMIT_EXCEEDED_SPECIFIED_WAIT_EXCEPTION", "REPLY_PROCESS_TIMEOUT_EXCEPTION", "RETRYABLE_PROCESSING_EXCEPTION", "SQL_EXECUTION_EXCEPTION", "STOP_PROCESS_DUE_TO_AUTO_REGENERATE", "TABLES_MISSING_EXCEPTION", "TOO_MANY_CERTIFIED_ANSWERS_EXCEPTION", "TOO_MANY_TABLES_EXCEPTION", "UNEXPECTED_REPLY_PROCESS_EXCEPTION", "UNKNOWN_AI_MODEL", "WAREHOUSE_ACCESS_MISSING_EXCEPTION", "WAREHOUSE_NOT_FOUND_EXCEPTION"`, v) } } diff --git a/service/iam/impl.go b/service/iam/impl.go index f8a118799..729cc9784 100755 --- a/service/iam/impl.go +++ b/service/iam/impl.go @@ -194,7 +194,6 @@ func (a *accountGroupsImpl) Patch(ctx context.Context, request PartialUpdate) er path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err @@ -205,7 +204,6 @@ func (a *accountGroupsImpl) Update(ctx context.Context, request Group) error { path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Groups/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err @@ -306,7 +304,6 @@ func (a *accountServicePrincipalsImpl) Patch(ctx context.Context, request Partia path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err @@ -317,7 +314,6 @@ func (a *accountServicePrincipalsImpl) Update(ctx context.Context, request Servi path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/ServicePrincipals/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err @@ -418,7 +414,6 @@ func (a *accountUsersImpl) Patch(ctx context.Context, request PartialUpdate) err path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err @@ -429,7 +424,6 @@ func (a *accountUsersImpl) Update(ctx context.Context, request User) error { path := fmt.Sprintf("/api/2.0/accounts/%v/scim/v2/Users/%v", a.client.ConfiguredAccountID(), request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err @@ -545,7 +539,6 @@ func (a *groupsImpl) Patch(ctx context.Context, request PartialUpdate) error { path := fmt.Sprintf("/api/2.0/preview/scim/v2/Groups/%v", request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err @@ -556,7 +549,6 @@ func (a *groupsImpl) Update(ctx context.Context, request Group) error { path := fmt.Sprintf("/api/2.0/preview/scim/v2/Groups/%v", request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err @@ -720,7 +712,6 @@ func (a *servicePrincipalsImpl) Patch(ctx context.Context, request PartialUpdate path := fmt.Sprintf("/api/2.0/preview/scim/v2/ServicePrincipals/%v", request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err @@ -731,7 +722,6 @@ func (a *servicePrincipalsImpl) Update(ctx context.Context, request ServicePrinc path := fmt.Sprintf("/api/2.0/preview/scim/v2/ServicePrincipals/%v", request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err @@ -852,7 +842,6 @@ func (a *usersImpl) Patch(ctx context.Context, request PartialUpdate) error { path := fmt.Sprintf("/api/2.0/preview/scim/v2/Users/%v", request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchResponse) return err @@ -874,7 +863,6 @@ func (a *usersImpl) Update(ctx context.Context, request User) error { path := fmt.Sprintf("/api/2.0/preview/scim/v2/Users/%v", request.Id) queryParams := make(map[string]any) headers := make(map[string]string) - headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" err := a.client.Do(ctx, http.MethodPut, path, headers, queryParams, request, &updateResponse) return err diff --git a/service/iam/model.go b/service/iam/model.go index 811e50611..adacda464 100755 --- a/service/iam/model.go +++ b/service/iam/model.go @@ -129,25 +129,25 @@ type ConsistencyToken struct { Value string `json:"value"` } -// Delete a group +// Delete a group. type DeleteAccountGroupRequest struct { // Unique ID for a group in the Databricks account. Id string `json:"-" url:"-"` } -// Delete a service principal +// Delete a service principal. type DeleteAccountServicePrincipalRequest struct { // Unique ID for a service principal in the Databricks account. Id string `json:"-" url:"-"` } -// Delete a user +// Delete a user. type DeleteAccountUserRequest struct { // Unique ID for a user in the Databricks account. Id string `json:"-" url:"-"` } -// Delete a group +// Delete a group. type DeleteGroupRequest struct { // Unique ID for a group in the Databricks workspace. Id string `json:"-" url:"-"` @@ -156,13 +156,13 @@ type DeleteGroupRequest struct { type DeleteResponse struct { } -// Delete a service principal +// Delete a service principal. type DeleteServicePrincipalRequest struct { // Unique ID for a service principal in the Databricks workspace. Id string `json:"-" url:"-"` } -// Delete a user +// Delete a user. type DeleteUserRequest struct { // Unique ID for a user in the Databricks workspace. Id string `json:"-" url:"-"` @@ -179,19 +179,19 @@ type DeleteWorkspaceAssignmentRequest struct { type DeleteWorkspacePermissionAssignmentResponse struct { } -// Get group details +// Get group details. type GetAccountGroupRequest struct { // Unique ID for a group in the Databricks account. Id string `json:"-" url:"-"` } -// Get service principal details +// Get service principal details. type GetAccountServicePrincipalRequest struct { // Unique ID for a service principal in the Databricks account. Id string `json:"-" url:"-"` } -// Get user details +// Get user details. type GetAccountUserRequest struct { // Comma-separated list of attributes to return in response. Attributes string `json:"-" url:"attributes,omitempty"` @@ -238,7 +238,7 @@ type GetAssignableRolesForResourceResponse struct { Roles []Role `json:"roles,omitempty"` } -// Get group details +// Get group details. type GetGroupRequest struct { // Unique ID for a group in the Databricks workspace. Id string `json:"-" url:"-"` @@ -289,7 +289,7 @@ type GetRuleSetRequest struct { Name string `json:"-" url:"name"` } -// Get service principal details +// Get service principal details. type GetServicePrincipalRequest struct { // Unique ID for a service principal in the Databricks workspace. Id string `json:"-" url:"-"` @@ -322,7 +322,7 @@ func (f *GetSortOrder) Type() string { return "GetSortOrder" } -// Get user details +// Get user details. type GetUserRequest struct { // Comma-separated list of attributes to return in response. Attributes string `json:"-" url:"attributes,omitempty"` @@ -431,7 +431,7 @@ func (f *GroupSchema) Type() string { return "GroupSchema" } -// List group details +// List group details. type ListAccountGroupsRequest struct { // Comma-separated list of attributes to return in response. Attributes string `json:"-" url:"attributes,omitempty"` @@ -465,7 +465,7 @@ func (s ListAccountGroupsRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// List service principals +// List service principals. type ListAccountServicePrincipalsRequest struct { // Comma-separated list of attributes to return in response. Attributes string `json:"-" url:"attributes,omitempty"` @@ -499,7 +499,7 @@ func (s ListAccountServicePrincipalsRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// List users +// List users. type ListAccountUsersRequest struct { // Comma-separated list of attributes to return in response. Attributes string `json:"-" url:"attributes,omitempty"` @@ -534,7 +534,7 @@ func (s ListAccountUsersRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// List group details +// List group details. type ListGroupsRequest struct { // Comma-separated list of attributes to return in response. Attributes string `json:"-" url:"attributes,omitempty"` @@ -641,7 +641,7 @@ func (s ListServicePrincipalResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// List service principals +// List service principals. type ListServicePrincipalsRequest struct { // Comma-separated list of attributes to return in response. Attributes string `json:"-" url:"attributes,omitempty"` @@ -702,7 +702,7 @@ func (f *ListSortOrder) Type() string { return "ListSortOrder" } -// List users +// List users. type ListUsersRequest struct { // Comma-separated list of attributes to return in response. Attributes string `json:"-" url:"attributes,omitempty"` @@ -840,7 +840,7 @@ func (s ObjectPermissions) MarshalJSON() ([]byte, error) { } type PartialUpdate struct { - // Unique ID for a user in the Databricks workspace. + // Unique ID in the Databricks workspace. Id string `json:"-" url:"-"` Operations []Patch `json:"Operations,omitempty"` @@ -1428,8 +1428,7 @@ type User struct { ExternalId string `json:"externalId,omitempty"` Groups []ComplexValue `json:"groups,omitempty"` - // Databricks user ID. This is automatically set by Databricks. Any value - // provided by the client will be ignored. + // Databricks user ID. Id string `json:"id,omitempty" url:"-"` Name *Name `json:"name,omitempty"` diff --git a/service/jobs/model.go b/service/jobs/model.go index 2d9df43db..4272ce10f 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -2253,6 +2253,8 @@ func (f *PauseStatus) Type() string { // Cluster Manager (see cluster-common PerformanceTarget). type PerformanceTarget string +const PerformanceTargetBalanced PerformanceTarget = `BALANCED` + const PerformanceTargetCostOptimized PerformanceTarget = `COST_OPTIMIZED` const PerformanceTargetPerformanceOptimized PerformanceTarget = `PERFORMANCE_OPTIMIZED` @@ -2265,11 +2267,11 @@ func (f *PerformanceTarget) String() string { // Set raw string value and validate it against allowed values func (f *PerformanceTarget) Set(v string) error { switch v { - case `COST_OPTIMIZED`, `PERFORMANCE_OPTIMIZED`: + case `BALANCED`, `COST_OPTIMIZED`, `PERFORMANCE_OPTIMIZED`: *f = PerformanceTarget(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "COST_OPTIMIZED", "PERFORMANCE_OPTIMIZED"`, v) + return fmt.Errorf(`value "%s" is not one of "BALANCED", "COST_OPTIMIZED", "PERFORMANCE_OPTIMIZED"`, v) } } diff --git a/service/marketplace/model.go b/service/marketplace/model.go index e4669a62d..62364f223 100755 --- a/service/marketplace/model.go +++ b/service/marketplace/model.go @@ -658,6 +658,8 @@ type FileParentType string const FileParentTypeListing FileParentType = `LISTING` +const FileParentTypeListingResource FileParentType = `LISTING_RESOURCE` + const FileParentTypeProvider FileParentType = `PROVIDER` // String representation for [fmt.Print] @@ -668,11 +670,11 @@ func (f *FileParentType) String() string { // Set raw string value and validate it against allowed values func (f *FileParentType) Set(v string) error { switch v { - case `LISTING`, `PROVIDER`: + case `LISTING`, `LISTING_RESOURCE`, `PROVIDER`: *f = FileParentType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "LISTING", "PROVIDER"`, v) + return fmt.Errorf(`value "%s" is not one of "LISTING", "LISTING_RESOURCE", "PROVIDER"`, v) } } @@ -1351,7 +1353,7 @@ type Listing struct { Detail *ListingDetail `json:"detail,omitempty"` Id string `json:"id,omitempty"` - // Next Number: 26 + Summary ListingSummary `json:"summary"` ForceSendFields []string `json:"-" url:"-"` @@ -1501,7 +1503,6 @@ func (f *ListingStatus) Type() string { return "ListingStatus" } -// Next Number: 26 type ListingSummary struct { Categories []Category `json:"categories,omitempty"` @@ -1617,6 +1618,8 @@ func (f *ListingType) Type() string { type MarketplaceFileType string +const MarketplaceFileTypeApp MarketplaceFileType = `APP` + const MarketplaceFileTypeEmbeddedNotebook MarketplaceFileType = `EMBEDDED_NOTEBOOK` const MarketplaceFileTypeProviderIcon MarketplaceFileType = `PROVIDER_ICON` @@ -1629,11 +1632,11 @@ func (f *MarketplaceFileType) String() string { // Set raw string value and validate it against allowed values func (f *MarketplaceFileType) Set(v string) error { switch v { - case `EMBEDDED_NOTEBOOK`, `PROVIDER_ICON`: + case `APP`, `EMBEDDED_NOTEBOOK`, `PROVIDER_ICON`: *f = MarketplaceFileType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "EMBEDDED_NOTEBOOK", "PROVIDER_ICON"`, v) + return fmt.Errorf(`value "%s" is not one of "APP", "EMBEDDED_NOTEBOOK", "PROVIDER_ICON"`, v) } } diff --git a/service/ml/model.go b/service/ml/model.go index 338134ae3..5573c0279 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -323,27 +323,17 @@ type CreateForecastingExperimentRequest struct { // Name of the column in the input training table used to customize the // weight for each time series to calculate weighted metrics. CustomWeightsColumn string `json:"custom_weights_column,omitempty"` - // The quantity of the input data granularity. Together with - // data_granularity_unit field, this defines the time interval between - // consecutive rows in the time series data. For now, only 1 second, - // 1/5/10/15/30 minutes, 1 hour, 1 day, 1 week, 1 month, 1 quarter, 1 year - // are supported. - DataGranularityQuantity int64 `json:"data_granularity_quantity,omitempty"` - // The time unit of the input data granularity. Together with - // data_granularity_quantity field, this defines the time interval between - // consecutive rows in the time series data. Possible values: * 'W' (weeks) - // * 'D' / 'days' / 'day' * 'hours' / 'hour' / 'hr' / 'h' * 'm' / 'minute' / - // 'min' / 'minutes' / 'T' * 'S' / 'seconds' / 'sec' / 'second' * 'M' / - // 'month' / 'months' * 'Q' / 'quarter' / 'quarters' * 'Y' / 'year' / - // 'years' - DataGranularityUnit string `json:"data_granularity_unit"` // The path to the created experiment. This is the path where the experiment // will be stored in the workspace. ExperimentPath string `json:"experiment_path,omitempty"` + // The granularity of the forecast. This defines the time interval between + // consecutive rows in the time series data. Possible values: '1 second', '1 + // minute', '5 minutes', '10 minutes', '15 minutes', '30 minutes', 'Hourly', + // 'Daily', 'Weekly', 'Monthly', 'Quarterly', 'Yearly'. + ForecastGranularity string `json:"forecast_granularity"` // The number of time steps into the future for which predictions should be - // made. This value represents a multiple of data_granularity_unit and - // data_granularity_quantity determining how far ahead the model will - // forecast. + // made. This value represents a multiple of forecast_granularity + // determining how far ahead the model will forecast. ForecastHorizon int64 `json:"forecast_horizon"` // Region code(s) to consider when automatically adding holiday features. // When empty, no holiday features are added. Only supports 1 holiday region diff --git a/service/oauth2/model.go b/service/oauth2/model.go index 7b65cd092..3d0fb78de 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -556,6 +556,13 @@ type OidcFederationPolicy struct { // endpoint. Databricks strongly recommends relying on your issuer’s well // known endpoint for discovering public keys. JwksJson string `json:"jwks_json,omitempty"` + // URL of the public keys used to validate the signature of federated + // tokens, in JWKS format. Most use cases should not need to specify this + // field. If jwks_uri and jwks_json are both unspecified (recommended), + // Databricks automatically fetches the public keys from your issuer’s + // well known endpoint. Databricks strongly recommends relying on your + // issuer’s well known endpoint for discovering public keys. + JwksUri string `json:"jwks_uri,omitempty"` // The required token subject, as specified in the subject claim of // federated tokens. Must be specified for service principal federation // policies. Must not be specified for account federation policies. diff --git a/service/pipelines/model.go b/service/pipelines/model.go index 3cbab434b..b1c2795da 100755 --- a/service/pipelines/model.go +++ b/service/pipelines/model.go @@ -37,6 +37,8 @@ type CreatePipeline struct { DryRun bool `json:"dry_run,omitempty"` // Pipeline product edition. Edition string `json:"edition,omitempty"` + // Event log configuration for this pipeline + EventLog *EventLogSpec `json:"event_log,omitempty"` // Filters on which Pipeline packages to include in the deployed graph. Filters *Filters `json:"filters,omitempty"` // The definition of a gateway pipeline to support change data capture. @@ -44,7 +46,7 @@ type CreatePipeline struct { // Unique identifier for this pipeline. Id string `json:"id,omitempty"` // The configuration for a managed ingestion pipeline. These settings cannot - // be used with the 'libraries', 'target' or 'catalog' settings. + // be used with the 'libraries', 'schema', 'target', or 'catalog' settings. IngestionDefinition *IngestionPipelineDefinition `json:"ingestion_definition,omitempty"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `json:"libraries,omitempty"` @@ -64,16 +66,15 @@ type CreatePipeline struct { // are specified, an error is thrown. RunAs *RunAs `json:"run_as,omitempty"` // The default schema (database) where tables are read from or published to. - // The presence of this field implies that the pipeline is in direct - // publishing mode. Schema string `json:"schema,omitempty"` // Whether serverless compute is enabled for this pipeline. Serverless bool `json:"serverless,omitempty"` // DBFS root directory for storing checkpoints and tables. Storage string `json:"storage,omitempty"` - // Target schema (database) to add tables in this pipeline to. If not - // specified, no data is published to the Hive metastore or Unity Catalog. - // To publish to Unity Catalog, also specify `catalog`. + // Target schema (database) to add tables in this pipeline to. Exactly one + // of `schema` or `target` must be specified. To publish to Unity Catalog, + // also specify `catalog`. This legacy field is deprecated for pipeline + // creation in favor of the `schema` field. Target string `json:"target,omitempty"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. Trigger *PipelineTrigger `json:"trigger,omitempty"` @@ -241,6 +242,8 @@ type EditPipeline struct { Development bool `json:"development,omitempty"` // Pipeline product edition. Edition string `json:"edition,omitempty"` + // Event log configuration for this pipeline + EventLog *EventLogSpec `json:"event_log,omitempty"` // If present, the last-modified time of the pipeline settings before the // edit. If the settings were modified after that time, then the request // will fail with a conflict. @@ -252,7 +255,7 @@ type EditPipeline struct { // Unique identifier for this pipeline. Id string `json:"id,omitempty"` // The configuration for a managed ingestion pipeline. These settings cannot - // be used with the 'libraries', 'target' or 'catalog' settings. + // be used with the 'libraries', 'schema', 'target', or 'catalog' settings. IngestionDefinition *IngestionPipelineDefinition `json:"ingestion_definition,omitempty"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `json:"libraries,omitempty"` @@ -274,16 +277,15 @@ type EditPipeline struct { // are specified, an error is thrown. RunAs *RunAs `json:"run_as,omitempty"` // The default schema (database) where tables are read from or published to. - // The presence of this field implies that the pipeline is in direct - // publishing mode. Schema string `json:"schema,omitempty"` // Whether serverless compute is enabled for this pipeline. Serverless bool `json:"serverless,omitempty"` // DBFS root directory for storing checkpoints and tables. Storage string `json:"storage,omitempty"` - // Target schema (database) to add tables in this pipeline to. If not - // specified, no data is published to the Hive metastore or Unity Catalog. - // To publish to Unity Catalog, also specify `catalog`. + // Target schema (database) to add tables in this pipeline to. Exactly one + // of `schema` or `target` must be specified. To publish to Unity Catalog, + // also specify `catalog`. This legacy field is deprecated for pipeline + // creation in favor of the `schema` field. Target string `json:"target,omitempty"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. Trigger *PipelineTrigger `json:"trigger,omitempty"` @@ -351,6 +353,26 @@ func (f *EventLevel) Type() string { return "EventLevel" } +// Configurable event log parameters. +type EventLogSpec struct { + // The UC catalog the event log is published under. + Catalog string `json:"catalog,omitempty"` + // The name the event log is published to in UC. + Name string `json:"name,omitempty"` + // The UC schema the event log is published under. + Schema string `json:"schema,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *EventLogSpec) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EventLogSpec) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type FileLibrary struct { // The absolute path of the file. Path string `json:"path,omitempty"` @@ -1187,6 +1209,8 @@ type PipelineSpec struct { Development bool `json:"development,omitempty"` // Pipeline product edition. Edition string `json:"edition,omitempty"` + // Event log configuration for this pipeline + EventLog *EventLogSpec `json:"event_log,omitempty"` // Filters on which Pipeline packages to include in the deployed graph. Filters *Filters `json:"filters,omitempty"` // The definition of a gateway pipeline to support change data capture. @@ -1194,7 +1218,7 @@ type PipelineSpec struct { // Unique identifier for this pipeline. Id string `json:"id,omitempty"` // The configuration for a managed ingestion pipeline. These settings cannot - // be used with the 'libraries', 'target' or 'catalog' settings. + // be used with the 'libraries', 'schema', 'target', or 'catalog' settings. IngestionDefinition *IngestionPipelineDefinition `json:"ingestion_definition,omitempty"` // Libraries or code needed by this deployment. Libraries []PipelineLibrary `json:"libraries,omitempty"` @@ -1207,16 +1231,15 @@ type PipelineSpec struct { // Restart window of this pipeline. RestartWindow *RestartWindow `json:"restart_window,omitempty"` // The default schema (database) where tables are read from or published to. - // The presence of this field implies that the pipeline is in direct - // publishing mode. Schema string `json:"schema,omitempty"` // Whether serverless compute is enabled for this pipeline. Serverless bool `json:"serverless,omitempty"` // DBFS root directory for storing checkpoints and tables. Storage string `json:"storage,omitempty"` - // Target schema (database) to add tables in this pipeline to. If not - // specified, no data is published to the Hive metastore or Unity Catalog. - // To publish to Unity Catalog, also specify `catalog`. + // Target schema (database) to add tables in this pipeline to. Exactly one + // of `schema` or `target` must be specified. To publish to Unity Catalog, + // also specify `catalog`. This legacy field is deprecated for pipeline + // creation in favor of the `schema` field. Target string `json:"target,omitempty"` // Which pipeline trigger to use. Deprecated: Use `continuous` instead. Trigger *PipelineTrigger `json:"trigger,omitempty"` diff --git a/service/pkg.go b/service/pkg.go index 3d35d9288..052bf33ae 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -339,8 +339,8 @@ var ( _ *marketplace.ConsumerListingsAPI = nil _ *marketplace.ConsumerPersonalizationRequestsAPI = nil _ *marketplace.ConsumerProvidersAPI = nil - _ *provisioning.CredentialsAPI = nil _ *catalog.CredentialsAPI = nil + _ *provisioning.CredentialsAPI = nil _ *settings.CredentialsManagerAPI = nil _ *settings.CspEnablementAccountAPI = nil _ *iam.CurrentUserAPI = nil diff --git a/service/serving/model.go b/service/serving/model.go index 8a0bb6b63..a52550167 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -33,6 +33,10 @@ func (s Ai21LabsConfig) MarshalJSON() ([]byte, error) { } type AiGatewayConfig struct { + // Configuration for traffic fallback which auto fallbacks to other served + // entities if the request to a served entity fails with certain error + // codes, to increase availability. + FallbackConfig *FallbackConfig `json:"fallback_config,omitempty"` // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` @@ -319,6 +323,27 @@ func (s AnthropicConfig) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type ApiKeyAuth struct { + // The name of the API key parameter used for authentication. + Key string `json:"key"` + // The Databricks secret key reference for an API Key. If you prefer to + // paste your token directly, see `value_plaintext`. + Value string `json:"value,omitempty"` + // The API Key provided as a plaintext string. If you prefer to reference + // your token using Databricks Secrets, see `value`. + ValuePlaintext string `json:"value_plaintext,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ApiKeyAuth) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ApiKeyAuth) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type AutoCaptureConfigInput struct { // The name of the catalog in Unity Catalog. NOTE: On update, you cannot // change the catalog name if the inference table is already enabled. @@ -373,6 +398,25 @@ type AutoCaptureState struct { PayloadTable *PayloadTable `json:"payload_table,omitempty"` } +type BearerTokenAuth struct { + // The Databricks secret key reference for a token. If you prefer to paste + // your token directly, see `token_plaintext`. + Token string `json:"token,omitempty"` + // The token provided as a plaintext string. If you prefer to reference your + // token using Databricks Secrets, see `token`. + TokenPlaintext string `json:"token_plaintext,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *BearerTokenAuth) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s BearerTokenAuth) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get build logs for a served model type BuildLogsRequest struct { // The name of the serving endpoint that the served model belongs to. This @@ -494,6 +538,18 @@ func (s CreateServingEndpoint) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Configs needed to create a custom provider model route. +type CustomProviderConfig struct { + // This is a field to provide API key authentication for the custom provider + // API. You can only specify one authentication method. + ApiKeyAuth *ApiKeyAuth `json:"api_key_auth,omitempty"` + // This is a field to provide bearer token authentication for the custom + // provider API. You can only specify one authentication method. + BearerTokenAuth *BearerTokenAuth `json:"bearer_token_auth,omitempty"` + // This is a field to provide the URL of the custom provider API. + CustomProviderUrl string `json:"custom_provider_url"` +} + // Details necessary to query this object's API through the DataPlane APIs. type DataPlaneInfo struct { // Authorization details as a string. @@ -862,6 +918,8 @@ type ExternalModel struct { AnthropicConfig *AnthropicConfig `json:"anthropic_config,omitempty"` // Cohere Config. Only required if the provider is 'cohere'. CohereConfig *CohereConfig `json:"cohere_config,omitempty"` + // Custom Provider Config. Only required if the provider is 'custom'. + CustomProviderConfig *CustomProviderConfig `json:"custom_provider_config,omitempty"` // Databricks Model Serving Config. Only required if the provider is // 'databricks-model-serving'. DatabricksModelServingConfig *DatabricksModelServingConfig `json:"databricks_model_serving_config,omitempty"` @@ -893,6 +951,8 @@ const ExternalModelProviderAnthropic ExternalModelProvider = `anthropic` const ExternalModelProviderCohere ExternalModelProvider = `cohere` +const ExternalModelProviderCustom ExternalModelProvider = `custom` + const ExternalModelProviderDatabricksModelServing ExternalModelProvider = `databricks-model-serving` const ExternalModelProviderGoogleCloudVertexAi ExternalModelProvider = `google-cloud-vertex-ai` @@ -909,11 +969,11 @@ func (f *ExternalModelProvider) String() string { // Set raw string value and validate it against allowed values func (f *ExternalModelProvider) Set(v string) error { switch v { - case `ai21labs`, `amazon-bedrock`, `anthropic`, `cohere`, `databricks-model-serving`, `google-cloud-vertex-ai`, `openai`, `palm`: + case `ai21labs`, `amazon-bedrock`, `anthropic`, `cohere`, `custom`, `databricks-model-serving`, `google-cloud-vertex-ai`, `openai`, `palm`: *f = ExternalModelProvider(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ai21labs", "amazon-bedrock", "anthropic", "cohere", "databricks-model-serving", "google-cloud-vertex-ai", "openai", "palm"`, v) + return fmt.Errorf(`value "%s" is not one of "ai21labs", "amazon-bedrock", "anthropic", "cohere", "custom", "databricks-model-serving", "google-cloud-vertex-ai", "openai", "palm"`, v) } } @@ -941,6 +1001,16 @@ func (s ExternalModelUsageElement) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type FallbackConfig struct { + // Whether to enable traffic fallback. When a served entity in the serving + // endpoint returns specific error codes (e.g. 500), the request will + // automatically be round-robin attempted with other served entities in the + // same endpoint, following the order of served entity list, until a + // successful response is returned. If all attempts fail, return the last + // response with the error code. + Enabled bool `json:"enabled"` +} + // All fields are not sensitive as they are hard-coded in the system and made // available to customers. type FoundationModel struct { @@ -1183,6 +1253,10 @@ func (s PayloadTable) MarshalJSON() ([]byte, error) { } type PutAiGatewayRequest struct { + // Configuration for traffic fallback which auto fallbacks to other served + // entities if the request to a served entity fails with certain error + // codes, to increase availability. + FallbackConfig *FallbackConfig `json:"fallback_config,omitempty"` // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` @@ -1202,6 +1276,10 @@ type PutAiGatewayRequest struct { } type PutAiGatewayResponse struct { + // Configuration for traffic fallback which auto fallbacks to other served + // entities if the request to a served entity fails with certain error + // codes, to increase availability. + FallbackConfig *FallbackConfig `json:"fallback_config,omitempty"` // Configuration for AI Guardrails to prevent unwanted data and unsafe data // in requests and responses. Guardrails *AiGatewayGuardrails `json:"guardrails,omitempty"` @@ -1691,6 +1769,8 @@ func (f *ServedModelInputWorkloadSize) Type() string { return "ServedModelInputWorkloadSize" } +// Please keep this in sync with with workload types in +// InferenceEndpointEntities.scala type ServedModelInputWorkloadType string const ServedModelInputWorkloadTypeCpu ServedModelInputWorkloadType = `CPU` @@ -2103,6 +2183,8 @@ type ServingEndpointPermissionsRequest struct { ServingEndpointId string `json:"-" url:"-"` } +// Please keep this in sync with with workload types in +// InferenceEndpointEntities.scala type ServingModelWorkloadType string const ServingModelWorkloadTypeCpu ServingModelWorkloadType = `CPU` diff --git a/service/settings/model.go b/service/settings/model.go index b4ec75db2..f05607616 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -2865,6 +2865,10 @@ const TokenTypeArclightAzureExchangeToken TokenType = `ARCLIGHT_AZURE_EXCHANGE_T const TokenTypeArclightAzureExchangeTokenWithUserDelegationKey TokenType = `ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY` +const TokenTypeArclightMultiTenantAzureExchangeToken TokenType = `ARCLIGHT_MULTI_TENANT_AZURE_EXCHANGE_TOKEN` + +const TokenTypeArclightMultiTenantAzureExchangeTokenWithUserDelegationKey TokenType = `ARCLIGHT_MULTI_TENANT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY` + const TokenTypeAzureActiveDirectoryToken TokenType = `AZURE_ACTIVE_DIRECTORY_TOKEN` // String representation for [fmt.Print] @@ -2875,11 +2879,11 @@ func (f *TokenType) String() string { // Set raw string value and validate it against allowed values func (f *TokenType) Set(v string) error { switch v { - case `ARCLIGHT_AZURE_EXCHANGE_TOKEN`, `ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY`, `AZURE_ACTIVE_DIRECTORY_TOKEN`: + case `ARCLIGHT_AZURE_EXCHANGE_TOKEN`, `ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY`, `ARCLIGHT_MULTI_TENANT_AZURE_EXCHANGE_TOKEN`, `ARCLIGHT_MULTI_TENANT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY`, `AZURE_ACTIVE_DIRECTORY_TOKEN`: *f = TokenType(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ARCLIGHT_AZURE_EXCHANGE_TOKEN", "ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY", "AZURE_ACTIVE_DIRECTORY_TOKEN"`, v) + return fmt.Errorf(`value "%s" is not one of "ARCLIGHT_AZURE_EXCHANGE_TOKEN", "ARCLIGHT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY", "ARCLIGHT_MULTI_TENANT_AZURE_EXCHANGE_TOKEN", "ARCLIGHT_MULTI_TENANT_AZURE_EXCHANGE_TOKEN_WITH_USER_DELEGATION_KEY", "AZURE_ACTIVE_DIRECTORY_TOKEN"`, v) } } diff --git a/service/sharing/model.go b/service/sharing/model.go index 0784530a4..887ee4a24 100755 --- a/service/sharing/model.go +++ b/service/sharing/model.go @@ -222,41 +222,7 @@ type DeltaSharingDependencyList struct { Dependencies []DeltaSharingDependency `json:"dependencies,omitempty"` } -// A Function in UC as a dependency. -type DeltaSharingFunctionDependency struct { - FunctionName string `json:"function_name,omitempty"` - - SchemaName string `json:"schema_name,omitempty"` - - ForceSendFields []string `json:"-" url:"-"` -} - -func (s *DeltaSharingFunctionDependency) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s DeltaSharingFunctionDependency) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -// A Table in UC as a dependency. -type DeltaSharingTableDependency struct { - SchemaName string `json:"schema_name,omitempty"` - - TableName string `json:"table_name,omitempty"` - - ForceSendFields []string `json:"-" url:"-"` -} - -func (s *DeltaSharingTableDependency) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s DeltaSharingTableDependency) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - -type Function struct { +type DeltaSharingFunction struct { // The aliass of registered model. Aliases []RegisteredModelAlias `json:"aliases,omitempty"` // The comment of the function. @@ -293,11 +259,45 @@ type Function struct { ForceSendFields []string `json:"-" url:"-"` } -func (s *Function) UnmarshalJSON(b []byte) error { +func (s *DeltaSharingFunction) UnmarshalJSON(b []byte) error { return marshal.Unmarshal(b, s) } -func (s Function) MarshalJSON() ([]byte, error) { +func (s DeltaSharingFunction) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A Function in UC as a dependency. +type DeltaSharingFunctionDependency struct { + FunctionName string `json:"function_name,omitempty"` + + SchemaName string `json:"schema_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *DeltaSharingFunctionDependency) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeltaSharingFunctionDependency) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// A Table in UC as a dependency. +type DeltaSharingTableDependency struct { + SchemaName string `json:"schema_name,omitempty"` + + TableName string `json:"table_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *DeltaSharingTableDependency) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DeltaSharingTableDependency) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } @@ -513,7 +513,7 @@ func (s ListProviderShareAssetsRequest) MarshalJSON() ([]byte, error) { // share. type ListProviderShareAssetsResponse struct { // The list of functions in the share. - Functions []Function `json:"functions,omitempty"` + Functions []DeltaSharingFunction `json:"functions,omitempty"` // The list of notebooks in the share. Notebooks []NotebookFile `json:"notebooks,omitempty"` // The list of tables in the share. diff --git a/service/sql/model.go b/service/sql/model.go index 56b0e2bfa..3d6336bf0 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -1811,6 +1811,53 @@ func (s ExternalLink) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type ExternalQuerySource struct { + // The canonical identifier for this SQL alert + AlertId string `json:"alert_id,omitempty"` + // The canonical identifier for this Lakeview dashboard + DashboardId string `json:"dashboard_id,omitempty"` + // The canonical identifier for this Genie space + GenieSpaceId string `json:"genie_space_id,omitempty"` + + JobInfo *ExternalQuerySourceJobInfo `json:"job_info,omitempty"` + // The canonical identifier for this legacy dashboard + LegacyDashboardId string `json:"legacy_dashboard_id,omitempty"` + // The canonical identifier for this notebook + NotebookId string `json:"notebook_id,omitempty"` + // The canonical identifier for this SQL query + SqlQueryId string `json:"sql_query_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ExternalQuerySource) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalQuerySource) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ExternalQuerySourceJobInfo struct { + // The canonical identifier for this job. + JobId string `json:"job_id,omitempty"` + // The canonical identifier of the run. This ID is unique across all runs of + // all jobs. + JobRunId string `json:"job_run_id,omitempty"` + // The canonical identifier of the task run. + JobTaskRunId string `json:"job_task_run_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ExternalQuerySourceJobInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ExternalQuerySourceJobInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type Format string const FormatArrowStream Format = `ARROW_STREAM` @@ -3141,6 +3188,10 @@ type QueryInfo struct { QueryEndTimeMs int64 `json:"query_end_time_ms,omitempty"` // The query ID. QueryId string `json:"query_id,omitempty"` + // A struct that contains key-value pairs representing Databricks entities + // that were involved in the execution of this statement, such as jobs, + // notebooks, or dashboards. This field only records Databricks entities. + QuerySource *ExternalQuerySource `json:"query_source,omitempty"` // The time the query started. QueryStartTimeMs int64 `json:"query_start_time_ms,omitempty"` // The text of the query. From 3aaa4c0ba2c4068d09018169237cd6a11977cfcd Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Wed, 26 Mar 2025 11:59:13 +0000 Subject: [PATCH 35/54] [Release] Release v0.61.0 ## Release v0.61.0 ### New Features and Improvements * Support user-to-machine authentication in the SDK ([#1108](https://github.com/databricks/databricks-sdk-go/pull/1108)). - Instances of `ApiClient` now share the same connection pool by default ([PR #1190](https://github.com/databricks/databricks-sdk-go/pull/1190)). ### Internal Changes - Stop recommending users to report an issue when the SDK encounters an unknown error ([PR #1189](https://github.com/databricks/databricks-sdk-go/pull/1189)). ### API Changes * Added `GenerateDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. * Added `EffectiveUserApiScopes`, `Oauth2AppClientId`, `Oauth2AppIntegrationId` and `UserApiScopes` fields for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). * Added `Abfss`, `Dbfs`, `ErrorMessage`, `ExecutionDurationSeconds`, `File`, `Gcs`, `S3`, `Status`, `Volumes` and `Workspace` fields for [compute.InitScriptInfoAndExecutionDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InitScriptInfoAndExecutionDetails). * [Breaking] Added `ForecastGranularity` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). * Added `JwksUri` field for [oauth2.OidcFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#OidcFederationPolicy). * Added `EventLog` field for [pipelines.CreatePipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#CreatePipeline). * Added `EventLog` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline). * Added `EventLog` field for [pipelines.PipelineSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineSpec). * Added `FallbackConfig` field for [serving.AiGatewayConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AiGatewayConfig). * Added `CustomProviderConfig` field for [serving.ExternalModel](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalModel). * Added `FallbackConfig` field for [serving.PutAiGatewayRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#PutAiGatewayRequest). * Added `FallbackConfig` field for [serving.PutAiGatewayResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#PutAiGatewayResponse). * Added `Aliases`, `Comment`, `DataType`, `DependencyList`, `FullDataType`, `Id`, `InputParams`, `Name`, `Properties`, `RoutineDefinition`, `Schema`, `SecurableKind`, `Share`, `ShareId`, `StorageLocation` and `Tags` fields for [sharing.DeltaSharingFunction](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#DeltaSharingFunction). * Added `QuerySource` field for [sql.QueryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryInfo). * Added `ForeignCatalog` enum value for [catalog.CatalogType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogType). * Added `Browse` enum value for [catalog.Privilege](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#Privilege). * Added `AccessTokenFailure`, `AllocationTimeout`, `AllocationTimeoutNodeDaemonNotReady`, `AllocationTimeoutNoHealthyClusters`, `AllocationTimeoutNoMatchedClusters`, `AllocationTimeoutNoReadyClusters`, `AllocationTimeoutNoUnallocatedClusters`, `AllocationTimeoutNoWarmedUpClusters`, `AwsInaccessibleKmsKeyFailure`, `AwsInstanceProfileUpdateFailure`, `AwsInvalidKeyPair`, `AwsInvalidKmsKeyState`, `AwsResourceQuotaExceeded`, `AzurePackedDeploymentPartialFailure`, `BootstrapTimeoutDueToMisconfig`, `BudgetPolicyLimitEnforcementActivated`, `BudgetPolicyResolutionFailure`, `CloudAccountSetupFailure`, `CloudOperationCancelled`, `CloudProviderInstanceNotLaunched`, `CloudProviderLaunchFailureDueToMisconfig`, `CloudProviderResourceStockoutDueToMisconfig`, `ClusterOperationThrottled`, `ClusterOperationTimeout`, `ControlPlaneRequestFailureDueToMisconfig`, `DataAccessConfigChanged`, `DisasterRecoveryReplication`, `DriverEviction`, `DriverLaunchTimeout`, `DriverNodeUnreachable`, `DriverOutOfDisk`, `DriverOutOfMemory`, `DriverPodCreationFailure`, `DriverUnexpectedFailure`, `DynamicSparkConfSizeExceeded`, `EosSparkImage`, `ExecutorPodUnscheduled`, `GcpApiRateQuotaExceeded`, `GcpForbidden`, `GcpIamTimeout`, `GcpInaccessibleKmsKeyFailure`, `GcpInsufficientCapacity`, `GcpIpSpaceExhausted`, `GcpKmsKeyPermissionDenied`, `GcpNotFound`, `GcpResourceQuotaExceeded`, `GcpServiceAccountAccessDenied`, `GcpServiceAccountNotFound`, `GcpSubnetNotReady`, `GcpTrustedImageProjectsViolated`, `GkeBasedClusterTermination`, `InitContainerNotFinished`, `InstancePoolMaxCapacityReached`, `InstancePoolNotFound`, `InstanceUnreachableDueToMisconfig`, `InternalCapacityFailure`, `InvalidAwsParameter`, `InvalidInstancePlacementProtocol`, `InvalidWorkerImageFailure`, `InPenaltyBox`, `LazyAllocationTimeout`, `MaintenanceMode`, `NetvisorSetupTimeout`, `NoMatchedK8s`, `NoMatchedK8sTestingTag`, `PodAssignmentFailure`, `PodSchedulingFailure`, `ResourceUsageBlocked`, `SecretCreationFailure`, `ServerlessLongRunningTerminated`, `SparkImageDownloadThrottled`, `SparkImageNotFound`, `SshBootstrapFailure`, `StorageDownloadFailureDueToMisconfig`, `StorageDownloadFailureSlow`, `StorageDownloadFailureThrottled`, `UnexpectedPodRecreation`, `UserInitiatedVmTermination` and `WorkspaceUpdate` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). * Added `GeneratedSqlQueryTooLongException` and `MissingSqlQueryException` enum values for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). * Added `Balanced` enum value for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). * Added `ListingResource` enum value for [marketplace.FileParentType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#FileParentType). * Added `App` enum value for [marketplace.MarketplaceFileType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#MarketplaceFileType). * Added `Custom` enum value for [serving.ExternalModelProvider](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalModelProvider). * Added `ArclightMultiTenantAzureExchangeToken` and `ArclightMultiTenantAzureExchangeTokenWithUserDelegationKey` enum values for [settings.TokenType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#TokenType). * [Breaking] Changed `CreateExperiment` method for [w.Forecasting](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ForecastingAPI) workspace-level service with new required argument order. * Changed `InstanceTypeId` field for [compute.NodeInstanceType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeInstanceType) to be required. * Changed `Category` field for [compute.NodeType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeType) to be required. * [Breaking] Changed `Functions` field for [sharing.ListProviderShareAssetsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#ListProviderShareAssetsResponse) to type [sharing.DeltaSharingFunctionList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#DeltaSharingFunctionList). * [Breaking] Removed `ExecutionDetails` and `Script` fields for [compute.InitScriptInfoAndExecutionDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InitScriptInfoAndExecutionDetails). * [Breaking] Removed `SupportsElasticDisk` field for [compute.NodeType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeType). * [Breaking] Removed `DataGranularityQuantity` and `DataGranularityUnit` fields for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). * [Breaking] Removed `Aliases`, `Comment`, `DataType`, `DependencyList`, `FullDataType`, `Id`, `InputParams`, `Name`, `Properties`, `RoutineDefinition`, `Schema`, `SecurableKind`, `Share`, `ShareId`, `StorageLocation` and `Tags` fields for [sharing.Function](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#Function). --- .release_metadata.json | 2 +- CHANGELOG.md | 46 ++++++++++++++++++++++++++++++++++++++++++ NEXT_CHANGELOG.md | 39 +---------------------------------- version/version.go | 2 +- 4 files changed, 49 insertions(+), 40 deletions(-) diff --git a/.release_metadata.json b/.release_metadata.json index 36450aea5..6c56a949d 100644 --- a/.release_metadata.json +++ b/.release_metadata.json @@ -1,3 +1,3 @@ { - "timestamp": "2025-03-11 14:39:51+0000" + "timestamp": "2025-03-26 11:59:09+0000" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 2c8ac907c..360ea7fc4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,51 @@ # Version changelog +## Release v0.61.0 + +### New Features and Improvements + +* Support user-to-machine authentication in the SDK ([#1108](https://github.com/databricks/databricks-sdk-go/pull/1108)). +- Instances of `ApiClient` now share the same connection pool by default ([PR #1190](https://github.com/databricks/databricks-sdk-go/pull/1190)). + +### Internal Changes + +- Stop recommending users to report an issue when the SDK encounters an unknown + error ([PR #1189](https://github.com/databricks/databricks-sdk-go/pull/1189)). + +### API Changes +* Added `GenerateDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Added `EffectiveUserApiScopes`, `Oauth2AppClientId`, `Oauth2AppIntegrationId` and `UserApiScopes` fields for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). +* Added `Abfss`, `Dbfs`, `ErrorMessage`, `ExecutionDurationSeconds`, `File`, `Gcs`, `S3`, `Status`, `Volumes` and `Workspace` fields for [compute.InitScriptInfoAndExecutionDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InitScriptInfoAndExecutionDetails). +* [Breaking] Added `ForecastGranularity` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* Added `JwksUri` field for [oauth2.OidcFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#OidcFederationPolicy). +* Added `EventLog` field for [pipelines.CreatePipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#CreatePipeline). +* Added `EventLog` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline). +* Added `EventLog` field for [pipelines.PipelineSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineSpec). +* Added `FallbackConfig` field for [serving.AiGatewayConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AiGatewayConfig). +* Added `CustomProviderConfig` field for [serving.ExternalModel](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalModel). +* Added `FallbackConfig` field for [serving.PutAiGatewayRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#PutAiGatewayRequest). +* Added `FallbackConfig` field for [serving.PutAiGatewayResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#PutAiGatewayResponse). +* Added `Aliases`, `Comment`, `DataType`, `DependencyList`, `FullDataType`, `Id`, `InputParams`, `Name`, `Properties`, `RoutineDefinition`, `Schema`, `SecurableKind`, `Share`, `ShareId`, `StorageLocation` and `Tags` fields for [sharing.DeltaSharingFunction](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#DeltaSharingFunction). +* Added `QuerySource` field for [sql.QueryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryInfo). +* Added `ForeignCatalog` enum value for [catalog.CatalogType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogType). +* Added `Browse` enum value for [catalog.Privilege](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#Privilege). +* Added `AccessTokenFailure`, `AllocationTimeout`, `AllocationTimeoutNodeDaemonNotReady`, `AllocationTimeoutNoHealthyClusters`, `AllocationTimeoutNoMatchedClusters`, `AllocationTimeoutNoReadyClusters`, `AllocationTimeoutNoUnallocatedClusters`, `AllocationTimeoutNoWarmedUpClusters`, `AwsInaccessibleKmsKeyFailure`, `AwsInstanceProfileUpdateFailure`, `AwsInvalidKeyPair`, `AwsInvalidKmsKeyState`, `AwsResourceQuotaExceeded`, `AzurePackedDeploymentPartialFailure`, `BootstrapTimeoutDueToMisconfig`, `BudgetPolicyLimitEnforcementActivated`, `BudgetPolicyResolutionFailure`, `CloudAccountSetupFailure`, `CloudOperationCancelled`, `CloudProviderInstanceNotLaunched`, `CloudProviderLaunchFailureDueToMisconfig`, `CloudProviderResourceStockoutDueToMisconfig`, `ClusterOperationThrottled`, `ClusterOperationTimeout`, `ControlPlaneRequestFailureDueToMisconfig`, `DataAccessConfigChanged`, `DisasterRecoveryReplication`, `DriverEviction`, `DriverLaunchTimeout`, `DriverNodeUnreachable`, `DriverOutOfDisk`, `DriverOutOfMemory`, `DriverPodCreationFailure`, `DriverUnexpectedFailure`, `DynamicSparkConfSizeExceeded`, `EosSparkImage`, `ExecutorPodUnscheduled`, `GcpApiRateQuotaExceeded`, `GcpForbidden`, `GcpIamTimeout`, `GcpInaccessibleKmsKeyFailure`, `GcpInsufficientCapacity`, `GcpIpSpaceExhausted`, `GcpKmsKeyPermissionDenied`, `GcpNotFound`, `GcpResourceQuotaExceeded`, `GcpServiceAccountAccessDenied`, `GcpServiceAccountNotFound`, `GcpSubnetNotReady`, `GcpTrustedImageProjectsViolated`, `GkeBasedClusterTermination`, `InitContainerNotFinished`, `InstancePoolMaxCapacityReached`, `InstancePoolNotFound`, `InstanceUnreachableDueToMisconfig`, `InternalCapacityFailure`, `InvalidAwsParameter`, `InvalidInstancePlacementProtocol`, `InvalidWorkerImageFailure`, `InPenaltyBox`, `LazyAllocationTimeout`, `MaintenanceMode`, `NetvisorSetupTimeout`, `NoMatchedK8s`, `NoMatchedK8sTestingTag`, `PodAssignmentFailure`, `PodSchedulingFailure`, `ResourceUsageBlocked`, `SecretCreationFailure`, `ServerlessLongRunningTerminated`, `SparkImageDownloadThrottled`, `SparkImageNotFound`, `SshBootstrapFailure`, `StorageDownloadFailureDueToMisconfig`, `StorageDownloadFailureSlow`, `StorageDownloadFailureThrottled`, `UnexpectedPodRecreation`, `UserInitiatedVmTermination` and `WorkspaceUpdate` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* Added `GeneratedSqlQueryTooLongException` and `MissingSqlQueryException` enum values for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). +* Added `Balanced` enum value for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). +* Added `ListingResource` enum value for [marketplace.FileParentType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#FileParentType). +* Added `App` enum value for [marketplace.MarketplaceFileType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#MarketplaceFileType). +* Added `Custom` enum value for [serving.ExternalModelProvider](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalModelProvider). +* Added `ArclightMultiTenantAzureExchangeToken` and `ArclightMultiTenantAzureExchangeTokenWithUserDelegationKey` enum values for [settings.TokenType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#TokenType). +* [Breaking] Changed `CreateExperiment` method for [w.Forecasting](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ForecastingAPI) workspace-level service with new required argument order. +* Changed `InstanceTypeId` field for [compute.NodeInstanceType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeInstanceType) to be required. +* Changed `Category` field for [compute.NodeType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeType) to be required. +* [Breaking] Changed `Functions` field for [sharing.ListProviderShareAssetsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#ListProviderShareAssetsResponse) to type [sharing.DeltaSharingFunctionList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#DeltaSharingFunctionList). +* [Breaking] Removed `ExecutionDetails` and `Script` fields for [compute.InitScriptInfoAndExecutionDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InitScriptInfoAndExecutionDetails). +* [Breaking] Removed `SupportsElasticDisk` field for [compute.NodeType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeType). +* [Breaking] Removed `DataGranularityQuantity` and `DataGranularityUnit` fields for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* [Breaking] Removed `Aliases`, `Comment`, `DataType`, `DependencyList`, `FullDataType`, `Id`, `InputParams`, `Name`, `Properties`, `RoutineDefinition`, `Schema`, `SecurableKind`, `Share`, `ShareId`, `StorageLocation` and `Tags` fields for [sharing.Function](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#Function). + + ## Release v0.60.0 ### API Changes diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index a5567234b..5fc234f9f 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,50 +1,13 @@ # NEXT CHANGELOG -## Release v0.61.0 +## Release v0.62.0 ### New Features and Improvements -* Support user-to-machine authentication in the SDK ([#1108](https://github.com/databricks/databricks-sdk-go/pull/1108)). -- Instances of `ApiClient` now share the same connection pool by default ([PR #1190](https://github.com/databricks/databricks-sdk-go/pull/1190)). - ### Bug Fixes ### Documentation ### Internal Changes -- Stop recommending users to report an issue when the SDK encounters an unknown - error ([PR #1189](https://github.com/databricks/databricks-sdk-go/pull/1189)). - ### API Changes -* Added `GenerateDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. -* Added `EffectiveUserApiScopes`, `Oauth2AppClientId`, `Oauth2AppIntegrationId` and `UserApiScopes` fields for [apps.App](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/apps#App). -* Added `Abfss`, `Dbfs`, `ErrorMessage`, `ExecutionDurationSeconds`, `File`, `Gcs`, `S3`, `Status`, `Volumes` and `Workspace` fields for [compute.InitScriptInfoAndExecutionDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InitScriptInfoAndExecutionDetails). -* [Breaking] Added `ForecastGranularity` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). -* Added `JwksUri` field for [oauth2.OidcFederationPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/oauth2#OidcFederationPolicy). -* Added `EventLog` field for [pipelines.CreatePipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#CreatePipeline). -* Added `EventLog` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline). -* Added `EventLog` field for [pipelines.PipelineSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineSpec). -* Added `FallbackConfig` field for [serving.AiGatewayConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#AiGatewayConfig). -* Added `CustomProviderConfig` field for [serving.ExternalModel](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalModel). -* Added `FallbackConfig` field for [serving.PutAiGatewayRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#PutAiGatewayRequest). -* Added `FallbackConfig` field for [serving.PutAiGatewayResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#PutAiGatewayResponse). -* Added `Aliases`, `Comment`, `DataType`, `DependencyList`, `FullDataType`, `Id`, `InputParams`, `Name`, `Properties`, `RoutineDefinition`, `Schema`, `SecurableKind`, `Share`, `ShareId`, `StorageLocation` and `Tags` fields for [sharing.DeltaSharingFunction](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#DeltaSharingFunction). -* Added `QuerySource` field for [sql.QueryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryInfo). -* Added `ForeignCatalog` enum value for [catalog.CatalogType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#CatalogType). -* Added `Browse` enum value for [catalog.Privilege](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#Privilege). -* Added `AccessTokenFailure`, `AllocationTimeout`, `AllocationTimeoutNodeDaemonNotReady`, `AllocationTimeoutNoHealthyClusters`, `AllocationTimeoutNoMatchedClusters`, `AllocationTimeoutNoReadyClusters`, `AllocationTimeoutNoUnallocatedClusters`, `AllocationTimeoutNoWarmedUpClusters`, `AwsInaccessibleKmsKeyFailure`, `AwsInstanceProfileUpdateFailure`, `AwsInvalidKeyPair`, `AwsInvalidKmsKeyState`, `AwsResourceQuotaExceeded`, `AzurePackedDeploymentPartialFailure`, `BootstrapTimeoutDueToMisconfig`, `BudgetPolicyLimitEnforcementActivated`, `BudgetPolicyResolutionFailure`, `CloudAccountSetupFailure`, `CloudOperationCancelled`, `CloudProviderInstanceNotLaunched`, `CloudProviderLaunchFailureDueToMisconfig`, `CloudProviderResourceStockoutDueToMisconfig`, `ClusterOperationThrottled`, `ClusterOperationTimeout`, `ControlPlaneRequestFailureDueToMisconfig`, `DataAccessConfigChanged`, `DisasterRecoveryReplication`, `DriverEviction`, `DriverLaunchTimeout`, `DriverNodeUnreachable`, `DriverOutOfDisk`, `DriverOutOfMemory`, `DriverPodCreationFailure`, `DriverUnexpectedFailure`, `DynamicSparkConfSizeExceeded`, `EosSparkImage`, `ExecutorPodUnscheduled`, `GcpApiRateQuotaExceeded`, `GcpForbidden`, `GcpIamTimeout`, `GcpInaccessibleKmsKeyFailure`, `GcpInsufficientCapacity`, `GcpIpSpaceExhausted`, `GcpKmsKeyPermissionDenied`, `GcpNotFound`, `GcpResourceQuotaExceeded`, `GcpServiceAccountAccessDenied`, `GcpServiceAccountNotFound`, `GcpSubnetNotReady`, `GcpTrustedImageProjectsViolated`, `GkeBasedClusterTermination`, `InitContainerNotFinished`, `InstancePoolMaxCapacityReached`, `InstancePoolNotFound`, `InstanceUnreachableDueToMisconfig`, `InternalCapacityFailure`, `InvalidAwsParameter`, `InvalidInstancePlacementProtocol`, `InvalidWorkerImageFailure`, `InPenaltyBox`, `LazyAllocationTimeout`, `MaintenanceMode`, `NetvisorSetupTimeout`, `NoMatchedK8s`, `NoMatchedK8sTestingTag`, `PodAssignmentFailure`, `PodSchedulingFailure`, `ResourceUsageBlocked`, `SecretCreationFailure`, `ServerlessLongRunningTerminated`, `SparkImageDownloadThrottled`, `SparkImageNotFound`, `SshBootstrapFailure`, `StorageDownloadFailureDueToMisconfig`, `StorageDownloadFailureSlow`, `StorageDownloadFailureThrottled`, `UnexpectedPodRecreation`, `UserInitiatedVmTermination` and `WorkspaceUpdate` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). -* Added `GeneratedSqlQueryTooLongException` and `MissingSqlQueryException` enum values for [dashboards.MessageErrorType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#MessageErrorType). -* Added `Balanced` enum value for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). -* Added `ListingResource` enum value for [marketplace.FileParentType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#FileParentType). -* Added `App` enum value for [marketplace.MarketplaceFileType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/marketplace#MarketplaceFileType). -* Added `Custom` enum value for [serving.ExternalModelProvider](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ExternalModelProvider). -* Added `ArclightMultiTenantAzureExchangeToken` and `ArclightMultiTenantAzureExchangeTokenWithUserDelegationKey` enum values for [settings.TokenType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#TokenType). -* [Breaking] Changed `CreateExperiment` method for [w.Forecasting](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ForecastingAPI) workspace-level service with new required argument order. -* Changed `InstanceTypeId` field for [compute.NodeInstanceType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeInstanceType) to be required. -* Changed `Category` field for [compute.NodeType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeType) to be required. -* [Breaking] Changed `Functions` field for [sharing.ListProviderShareAssetsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#ListProviderShareAssetsResponse) to type [sharing.DeltaSharingFunctionList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#DeltaSharingFunctionList). -* [Breaking] Removed `ExecutionDetails` and `Script` fields for [compute.InitScriptInfoAndExecutionDetails](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InitScriptInfoAndExecutionDetails). -* [Breaking] Removed `SupportsElasticDisk` field for [compute.NodeType](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#NodeType). -* [Breaking] Removed `DataGranularityQuantity` and `DataGranularityUnit` fields for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). -* [Breaking] Removed `Aliases`, `Comment`, `DataType`, `DependencyList`, `FullDataType`, `Id`, `InputParams`, `Name`, `Properties`, `RoutineDefinition`, `Schema`, `SecurableKind`, `Share`, `ShareId`, `StorageLocation` and `Tags` fields for [sharing.Function](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sharing#Function). diff --git a/version/version.go b/version/version.go index 6479f4d94..47cad98d6 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.60.0" +const Version = "0.61.0" From 0022508e8574f02cf9f07e55ea3457f586d5a83c Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Tue, 8 Apr 2025 19:16:11 +0200 Subject: [PATCH 36/54] [Internal] Remove `TestAccDashboards` as legacy dashboard creation on has been discontinued (#1205) ## What changes are proposed in this pull request? As of April 7, 2025: Official support for the legacy version of dashboards has ended. https://docs.databricks.com/gcp/en/sql/user/dashboards. Removing the test and keeping it commented out for context as discussed. ## How is this tested? N/A --- NEXT_CHANGELOG.md | 1 + internal/dbsql_test.go | 63 ++++++++++++++++++++---------------------- 2 files changed, 31 insertions(+), 33 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 5fc234f9f..ce727a443 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -9,5 +9,6 @@ ### Documentation ### Internal Changes +* Remove `TestAccDashboards` as legacy dashboard creation on has been discontinued ([#1205](https://github.com/databricks/databricks-sdk-go/pull/1205)). ### API Changes diff --git a/internal/dbsql_test.go b/internal/dbsql_test.go index 8478661fd..ed33c5373 100644 --- a/internal/dbsql_test.go +++ b/internal/dbsql_test.go @@ -119,36 +119,33 @@ func TestAccAlerts(t *testing.T) { assert.Equal(t, alert.Id, names[byId.DisplayName]) } -func TestAccDashboards(t *testing.T) { - ctx, w := workspaceTest(t) - - created, err := w.Dashboards.Create(ctx, sql.DashboardPostContent{ - Name: RandomName("go-sdk-"), - }) - require.NoError(t, err) - - defer w.Dashboards.DeleteByDashboardId(ctx, created.Id) - - byId, err := w.Dashboards.GetByDashboardId(ctx, created.Id) - require.NoError(t, err) - - byName, err := w.Dashboards.GetByName(ctx, byId.Name) - require.NoError(t, err) - assert.Equal(t, byId.Id, byName.Id) - - all, err := w.Dashboards.ListAll(ctx, sql.ListDashboardsRequest{}) - require.NoError(t, err) - - names, err := w.Dashboards.DashboardNameToIdMap(ctx, sql.ListDashboardsRequest{}) - require.NoError(t, err) - assert.Equal(t, created.Id, names[byId.Name]) - assert.Equal(t, len(all), len(names)) - - err = w.Dashboards.DeleteByDashboardId(ctx, created.Id) - require.NoError(t, err) - - err = w.Dashboards.Restore(ctx, sql.RestoreDashboardRequest{ - DashboardId: created.Id, - }) - require.NoError(t, err) -} +// As of April 7, 2025: Official support for the legacy version of dashboards has ended. More context: https://docs.databricks.com/gcp/en/sql/user/dashboards +// func TestAccDashboards(t *testing.T) { +// ctx, w := workspaceTest(t) +// // As of April 7, 2025: Official support for the legacy version of dashboards has ended. +// if w.Config.IsGcp() { +// t.Skip("Legacy dashboard creation is not supported on GCP") +// } +// created, err := w.Dashboards.Create(ctx, sql.DashboardPostContent{ +// Name: RandomName("go-sdk-"), +// }) +// require.NoError(t, err) +// defer w.Dashboards.DeleteByDashboardId(ctx, created.Id) +// byId, err := w.Dashboards.GetByDashboardId(ctx, created.Id) +// require.NoError(t, err) +// byName, err := w.Dashboards.GetByName(ctx, byId.Name) +// require.NoError(t, err) +// assert.Equal(t, byId.Id, byName.Id) +// all, err := w.Dashboards.ListAll(ctx, sql.ListDashboardsRequest{}) +// require.NoError(t, err) +// names, err := w.Dashboards.DashboardNameToIdMap(ctx, sql.ListDashboardsRequest{}) +// require.NoError(t, err) +// assert.Equal(t, created.Id, names[byId.Name]) +// assert.Equal(t, len(all), len(names)) +// err = w.Dashboards.DeleteByDashboardId(ctx, created.Id) +// require.NoError(t, err) +// err = w.Dashboards.Restore(ctx, sql.RestoreDashboardRequest{ +// DashboardId: created.Id, +// }) +// require.NoError(t, err) +// } From 8bf673d50c0eec405cdcd3d0a0ea6aa2edf6fb74 Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Thu, 10 Apr 2025 13:33:26 +0000 Subject: [PATCH 37/54] [Release] Release v0.62.0 ## Release v0.62.0 ### Internal Changes * Remove `TestAccDashboards` as legacy dashboard creation on has been discontinued ([#1205](https://github.com/databricks/databricks-sdk-go/pull/1205)). --- .release_metadata.json | 2 +- CHANGELOG.md | 6 ++++++ NEXT_CHANGELOG.md | 3 +-- version/version.go | 2 +- 4 files changed, 9 insertions(+), 4 deletions(-) diff --git a/.release_metadata.json b/.release_metadata.json index 6c56a949d..73ae17128 100644 --- a/.release_metadata.json +++ b/.release_metadata.json @@ -1,3 +1,3 @@ { - "timestamp": "2025-03-26 11:59:09+0000" + "timestamp": "2025-04-10 13:33:22+0000" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 360ea7fc4..6e1babf14 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,11 @@ # Version changelog +## Release v0.62.0 + +### Internal Changes +* Remove `TestAccDashboards` as legacy dashboard creation on has been discontinued ([#1205](https://github.com/databricks/databricks-sdk-go/pull/1205)). + + ## Release v0.61.0 ### New Features and Improvements diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index ce727a443..2fdb85ae0 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,6 +1,6 @@ # NEXT CHANGELOG -## Release v0.62.0 +## Release v0.63.0 ### New Features and Improvements @@ -9,6 +9,5 @@ ### Documentation ### Internal Changes -* Remove `TestAccDashboards` as legacy dashboard creation on has been discontinued ([#1205](https://github.com/databricks/databricks-sdk-go/pull/1205)). ### API Changes diff --git a/version/version.go b/version/version.go index 47cad98d6..20a1e06cc 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.61.0" +const Version = "0.62.0" From c99ac650147f81274699a269af73d2c2eac735f7 Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Mon, 14 Apr 2025 16:18:45 +0200 Subject: [PATCH 38/54] [Internal] Update SDK to latest OpenAPI spec (#1207) ## What changes are proposed in this pull request? Update SDK to latest OpenAPI spec ## How is this tested? N/A --------- Co-authored-by: Parth Bansal --- .codegen/_openapi_sha | 2 +- NEXT_CHANGELOG.md | 26 ++ experimental/mocks/mock_workspace_client.go | 33 ++ .../dashboards/mock_genie_interface.go | 122 ++++++ .../mock_lakeview_embedded_interface.go | 118 ++++++ .../service/ml/mock_experiments_interface.go | 236 +++++++++++ .../pipelines/mock_pipelines_interface.go | 61 --- .../mock_enable_export_notebook_interface.go | 154 +++++++ ...able_notebook_table_clipboard_interface.go | 154 +++++++ ...ck_enable_results_downloading_interface.go | 154 +++++++ .../settings/mock_settings_interface.go | 141 +++++++ service/billing/model.go | 4 + service/catalog/model.go | 28 +- service/compute/model.go | 17 +- service/dashboards/api.go | 110 ++++- service/dashboards/impl.go | 22 +- service/dashboards/interface.go | 40 +- service/dashboards/model.go | 116 +++++- service/jobs/api.go | 52 ++- service/jobs/interface.go | 24 +- service/jobs/model.go | 381 +++++++++++++++--- service/ml/api.go | 26 ++ service/ml/impl.go | 20 + service/ml/interface.go | 6 + service/ml/model.go | 228 ++++++++--- service/pipelines/api.go | 58 --- service/pkg.go | 15 +- service/serving/api.go | 9 +- service/serving/interface.go | 9 +- service/serving/model.go | 18 +- service/settings/api.go | 158 +++++++- service/settings/impl.go | 78 ++++ service/settings/interface.go | 60 ++- service/settings/model.go | 123 ++++++ service/sql/model.go | 11 +- 35 files changed, 2491 insertions(+), 323 deletions(-) create mode 100644 experimental/mocks/service/settings/mock_enable_export_notebook_interface.go create mode 100644 experimental/mocks/service/settings/mock_enable_notebook_table_clipboard_interface.go create mode 100644 experimental/mocks/service/settings/mock_enable_results_downloading_interface.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 2924d5d6d..26ece1bc5 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -31b3fea21dbe5a3a652937691602eb66d6dba30b \ No newline at end of file +05692f4dcf168be190bb7bcda725ee8b368b7ae3 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 2fdb85ae0..68b9d1c6e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -11,3 +11,29 @@ ### Internal Changes ### API Changes +* Added [w.EnableExportNotebook](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableExportNotebookAPI) workspace-level service, [w.EnableNotebookTableClipboard](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableNotebookTableClipboardAPI) workspace-level service and [w.EnableResultsDownloading](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableResultsDownloadingAPI) workspace-level service. +* Added `GetCredentialsForTraceDataDownload` and `GetCredentialsForTraceDataUpload` methods for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service. +* Added `GetDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Added `GetPublishedDashboardTokenInfo` method for [w.LakeviewEmbedded](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#LakeviewEmbeddedAPI) workspace-level service. +* Added `BindingWorkspaceIds` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy). +* Added `DownloadId` field for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). +* Added `DashboardOutput` field for [jobs.RunOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunOutput). +* Added `DashboardTask` and `PowerBiTask` fields for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). +* Added `DashboardTask` and `PowerBiTask` fields for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). +* Added `DashboardTask` and `PowerBiTask` fields for [jobs.Task](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Task). +* Added `IncludeFeatures` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* Added `Models` field for [ml.LogInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogInputs). +* Added `DatasetDigest`, `DatasetName` and `ModelId` fields for [ml.LogMetric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogMetric). +* Added `DatasetDigest`, `DatasetName`, `ModelId` and `RunId` fields for [ml.Metric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Metric). +* Added `ModelInputs` field for [ml.RunInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#RunInputs). +* Added `ClientApplication` field for [sql.QueryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryInfo). +* Added `Geography` and `Geometry` enum values for [catalog.ColumnTypeName](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ColumnTypeName). +* Added `AllocationTimeoutNoHealthyAndWarmedUpClusters`, `DockerContainerCreationException`, `DockerImageTooLargeForInstanceException` and `DockerInvalidOsException` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* Added `Standard` enum value for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). +* Added `CanView` enum value for [sql.WarehousePermissionLevel](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#WarehousePermissionLevel). +* [Breaking] Changed `GenerateDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service . Method path has changed. +* [Breaking] Changed waiter for [CommandExecutionAPI.Create](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Create). +* [Breaking] Changed waiter for [CommandExecutionAPI.Execute](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Execute). +* [Breaking] Removed `Error`, `Status` and `TransientStatementId` fields for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). +* [Breaking] Removed `Balanced` and `CostOptimized` enum values for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). +* [Breaking] Removed [PipelinesAPI.WaitGetPipelineRunning](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelinesAPI.WaitGetPipelineRunning) method. diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index b52b79a32..a14f2697e 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -166,6 +166,15 @@ func NewMockWorkspaceClient(t interface { mockdisableLegacyDbfs := settings.NewMockDisableLegacyDbfsInterface(t) mocksettingsAPI.On("DisableLegacyDbfs").Return(mockdisableLegacyDbfs).Maybe() + mockenableExportNotebook := settings.NewMockEnableExportNotebookInterface(t) + mocksettingsAPI.On("EnableExportNotebook").Return(mockenableExportNotebook).Maybe() + + mockenableNotebookTableClipboard := settings.NewMockEnableNotebookTableClipboardInterface(t) + mocksettingsAPI.On("EnableNotebookTableClipboard").Return(mockenableNotebookTableClipboard).Maybe() + + mockenableResultsDownloading := settings.NewMockEnableResultsDownloadingInterface(t) + mocksettingsAPI.On("EnableResultsDownloading").Return(mockenableResultsDownloading).Maybe() + mockenhancedSecurityMonitoring := settings.NewMockEnhancedSecurityMonitoringInterface(t) mocksettingsAPI.On("EnhancedSecurityMonitoring").Return(mockenhancedSecurityMonitoring).Maybe() @@ -231,6 +240,30 @@ func (m *MockWorkspaceClient) GetMockDisableLegacyDbfsAPI() *settings.MockDisabl return api } +func (m *MockWorkspaceClient) GetMockEnableExportNotebookAPI() *settings.MockEnableExportNotebookInterface { + api, ok := m.GetMockSettingsAPI().EnableExportNotebook().(*settings.MockEnableExportNotebookInterface) + if !ok { + panic(fmt.Sprintf("expected EnableExportNotebook to be *settings.MockEnableExportNotebookInterface, actual was %T", m.GetMockSettingsAPI().EnableExportNotebook())) + } + return api +} + +func (m *MockWorkspaceClient) GetMockEnableNotebookTableClipboardAPI() *settings.MockEnableNotebookTableClipboardInterface { + api, ok := m.GetMockSettingsAPI().EnableNotebookTableClipboard().(*settings.MockEnableNotebookTableClipboardInterface) + if !ok { + panic(fmt.Sprintf("expected EnableNotebookTableClipboard to be *settings.MockEnableNotebookTableClipboardInterface, actual was %T", m.GetMockSettingsAPI().EnableNotebookTableClipboard())) + } + return api +} + +func (m *MockWorkspaceClient) GetMockEnableResultsDownloadingAPI() *settings.MockEnableResultsDownloadingInterface { + api, ok := m.GetMockSettingsAPI().EnableResultsDownloading().(*settings.MockEnableResultsDownloadingInterface) + if !ok { + panic(fmt.Sprintf("expected EnableResultsDownloading to be *settings.MockEnableResultsDownloadingInterface, actual was %T", m.GetMockSettingsAPI().EnableResultsDownloading())) + } + return api +} + func (m *MockWorkspaceClient) GetMockEnhancedSecurityMonitoringAPI() *settings.MockEnhancedSecurityMonitoringInterface { api, ok := m.GetMockSettingsAPI().EnhancedSecurityMonitoring().(*settings.MockEnhancedSecurityMonitoringInterface) if !ok { diff --git a/experimental/mocks/service/dashboards/mock_genie_interface.go b/experimental/mocks/service/dashboards/mock_genie_interface.go index c4ac32fd4..8803a3ca0 100644 --- a/experimental/mocks/service/dashboards/mock_genie_interface.go +++ b/experimental/mocks/service/dashboards/mock_genie_interface.go @@ -336,6 +336,128 @@ func (_c *MockGenieInterface_GenerateDownloadFullQueryResult_Call) RunAndReturn( return _c } +// GetDownloadFullQueryResult provides a mock function with given fields: ctx, request +func (_m *MockGenieInterface) GetDownloadFullQueryResult(ctx context.Context, request dashboards.GenieGetDownloadFullQueryResultRequest) (*dashboards.GenieGetDownloadFullQueryResultResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetDownloadFullQueryResult") + } + + var r0 *dashboards.GenieGetDownloadFullQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetDownloadFullQueryResultRequest) (*dashboards.GenieGetDownloadFullQueryResultResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GenieGetDownloadFullQueryResultRequest) *dashboards.GenieGetDownloadFullQueryResultResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGetDownloadFullQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GenieGetDownloadFullQueryResultRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetDownloadFullQueryResult_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDownloadFullQueryResult' +type MockGenieInterface_GetDownloadFullQueryResult_Call struct { + *mock.Call +} + +// GetDownloadFullQueryResult is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GenieGetDownloadFullQueryResultRequest +func (_e *MockGenieInterface_Expecter) GetDownloadFullQueryResult(ctx interface{}, request interface{}) *MockGenieInterface_GetDownloadFullQueryResult_Call { + return &MockGenieInterface_GetDownloadFullQueryResult_Call{Call: _e.mock.On("GetDownloadFullQueryResult", ctx, request)} +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResult_Call) Run(run func(ctx context.Context, request dashboards.GenieGetDownloadFullQueryResultRequest)) *MockGenieInterface_GetDownloadFullQueryResult_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GenieGetDownloadFullQueryResultRequest)) + }) + return _c +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResult_Call) Return(_a0 *dashboards.GenieGetDownloadFullQueryResultResponse, _a1 error) *MockGenieInterface_GetDownloadFullQueryResult_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResult_Call) RunAndReturn(run func(context.Context, dashboards.GenieGetDownloadFullQueryResultRequest) (*dashboards.GenieGetDownloadFullQueryResultResponse, error)) *MockGenieInterface_GetDownloadFullQueryResult_Call { + _c.Call.Return(run) + return _c +} + +// GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId provides a mock function with given fields: ctx, spaceId, conversationId, messageId, attachmentId, downloadId +func (_m *MockGenieInterface) GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*dashboards.GenieGetDownloadFullQueryResultResponse, error) { + ret := _m.Called(ctx, spaceId, conversationId, messageId, attachmentId, downloadId) + + if len(ret) == 0 { + panic("no return value specified for GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId") + } + + var r0 *dashboards.GenieGetDownloadFullQueryResultResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string) (*dashboards.GenieGetDownloadFullQueryResultResponse, error)); ok { + return rf(ctx, spaceId, conversationId, messageId, attachmentId, downloadId) + } + if rf, ok := ret.Get(0).(func(context.Context, string, string, string, string, string) *dashboards.GenieGetDownloadFullQueryResultResponse); ok { + r0 = rf(ctx, spaceId, conversationId, messageId, attachmentId, downloadId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GenieGetDownloadFullQueryResultResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string, string, string, string, string) error); ok { + r1 = rf(ctx, spaceId, conversationId, messageId, attachmentId, downloadId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId' +type MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call struct { + *mock.Call +} + +// GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId is a helper method to define mock.On call +// - ctx context.Context +// - spaceId string +// - conversationId string +// - messageId string +// - attachmentId string +// - downloadId string +func (_e *MockGenieInterface_Expecter) GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx interface{}, spaceId interface{}, conversationId interface{}, messageId interface{}, attachmentId interface{}, downloadId interface{}) *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call { + return &MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call{Call: _e.mock.On("GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId", ctx, spaceId, conversationId, messageId, attachmentId, downloadId)} +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call) Run(run func(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string)) *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string), args[2].(string), args[3].(string), args[4].(string), args[5].(string)) + }) + return _c +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call) Return(_a0 *dashboards.GenieGetDownloadFullQueryResultResponse, _a1 error) *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call) RunAndReturn(run func(context.Context, string, string, string, string, string) (*dashboards.GenieGetDownloadFullQueryResultResponse, error)) *MockGenieInterface_GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId_Call { + _c.Call.Return(run) + return _c +} + // GetMessage provides a mock function with given fields: ctx, request func (_m *MockGenieInterface) GetMessage(ctx context.Context, request dashboards.GenieGetConversationMessageRequest) (*dashboards.GenieMessage, error) { ret := _m.Called(ctx, request) diff --git a/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go b/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go index 4e2cc2b04..51479c6ef 100644 --- a/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go +++ b/experimental/mocks/service/dashboards/mock_lakeview_embedded_interface.go @@ -116,6 +116,124 @@ func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardEmbeddedByDashboard return _c } +// GetPublishedDashboardTokenInfo provides a mock function with given fields: ctx, request +func (_m *MockLakeviewEmbeddedInterface) GetPublishedDashboardTokenInfo(ctx context.Context, request dashboards.GetPublishedDashboardTokenInfoRequest) (*dashboards.GetPublishedDashboardTokenInfoResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetPublishedDashboardTokenInfo") + } + + var r0 *dashboards.GetPublishedDashboardTokenInfoResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GetPublishedDashboardTokenInfoRequest) (*dashboards.GetPublishedDashboardTokenInfoResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, dashboards.GetPublishedDashboardTokenInfoRequest) *dashboards.GetPublishedDashboardTokenInfoResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GetPublishedDashboardTokenInfoResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, dashboards.GetPublishedDashboardTokenInfoRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPublishedDashboardTokenInfo' +type MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call struct { + *mock.Call +} + +// GetPublishedDashboardTokenInfo is a helper method to define mock.On call +// - ctx context.Context +// - request dashboards.GetPublishedDashboardTokenInfoRequest +func (_e *MockLakeviewEmbeddedInterface_Expecter) GetPublishedDashboardTokenInfo(ctx interface{}, request interface{}) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call { + return &MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call{Call: _e.mock.On("GetPublishedDashboardTokenInfo", ctx, request)} +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call) Run(run func(ctx context.Context, request dashboards.GetPublishedDashboardTokenInfoRequest)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(dashboards.GetPublishedDashboardTokenInfoRequest)) + }) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call) Return(_a0 *dashboards.GetPublishedDashboardTokenInfoResponse, _a1 error) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call) RunAndReturn(run func(context.Context, dashboards.GetPublishedDashboardTokenInfoRequest) (*dashboards.GetPublishedDashboardTokenInfoResponse, error)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfo_Call { + _c.Call.Return(run) + return _c +} + +// GetPublishedDashboardTokenInfoByDashboardId provides a mock function with given fields: ctx, dashboardId +func (_m *MockLakeviewEmbeddedInterface) GetPublishedDashboardTokenInfoByDashboardId(ctx context.Context, dashboardId string) (*dashboards.GetPublishedDashboardTokenInfoResponse, error) { + ret := _m.Called(ctx, dashboardId) + + if len(ret) == 0 { + panic("no return value specified for GetPublishedDashboardTokenInfoByDashboardId") + } + + var r0 *dashboards.GetPublishedDashboardTokenInfoResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*dashboards.GetPublishedDashboardTokenInfoResponse, error)); ok { + return rf(ctx, dashboardId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *dashboards.GetPublishedDashboardTokenInfoResponse); ok { + r0 = rf(ctx, dashboardId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*dashboards.GetPublishedDashboardTokenInfoResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, dashboardId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetPublishedDashboardTokenInfoByDashboardId' +type MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call struct { + *mock.Call +} + +// GetPublishedDashboardTokenInfoByDashboardId is a helper method to define mock.On call +// - ctx context.Context +// - dashboardId string +func (_e *MockLakeviewEmbeddedInterface_Expecter) GetPublishedDashboardTokenInfoByDashboardId(ctx interface{}, dashboardId interface{}) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call { + return &MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call{Call: _e.mock.On("GetPublishedDashboardTokenInfoByDashboardId", ctx, dashboardId)} +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call) Run(run func(ctx context.Context, dashboardId string)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call) Return(_a0 *dashboards.GetPublishedDashboardTokenInfoResponse, _a1 error) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call) RunAndReturn(run func(context.Context, string) (*dashboards.GetPublishedDashboardTokenInfoResponse, error)) *MockLakeviewEmbeddedInterface_GetPublishedDashboardTokenInfoByDashboardId_Call { + _c.Call.Return(run) + return _c +} + // NewMockLakeviewEmbeddedInterface creates a new instance of MockLakeviewEmbeddedInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMockLakeviewEmbeddedInterface(t interface { diff --git a/experimental/mocks/service/ml/mock_experiments_interface.go b/experimental/mocks/service/ml/mock_experiments_interface.go index e13c8f47f..7a7226309 100644 --- a/experimental/mocks/service/ml/mock_experiments_interface.go +++ b/experimental/mocks/service/ml/mock_experiments_interface.go @@ -401,6 +401,242 @@ func (_c *MockExperimentsInterface_GetByName_Call) RunAndReturn(run func(context return _c } +// GetCredentialsForTraceDataDownload provides a mock function with given fields: ctx, request +func (_m *MockExperimentsInterface) GetCredentialsForTraceDataDownload(ctx context.Context, request ml.GetCredentialsForTraceDataDownloadRequest) (*ml.GetCredentialsForTraceDataDownloadResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetCredentialsForTraceDataDownload") + } + + var r0 *ml.GetCredentialsForTraceDataDownloadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ml.GetCredentialsForTraceDataDownloadRequest) (*ml.GetCredentialsForTraceDataDownloadResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, ml.GetCredentialsForTraceDataDownloadRequest) *ml.GetCredentialsForTraceDataDownloadResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.GetCredentialsForTraceDataDownloadResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ml.GetCredentialsForTraceDataDownloadRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCredentialsForTraceDataDownload' +type MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call struct { + *mock.Call +} + +// GetCredentialsForTraceDataDownload is a helper method to define mock.On call +// - ctx context.Context +// - request ml.GetCredentialsForTraceDataDownloadRequest +func (_e *MockExperimentsInterface_Expecter) GetCredentialsForTraceDataDownload(ctx interface{}, request interface{}) *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call { + return &MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call{Call: _e.mock.On("GetCredentialsForTraceDataDownload", ctx, request)} +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call) Run(run func(ctx context.Context, request ml.GetCredentialsForTraceDataDownloadRequest)) *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ml.GetCredentialsForTraceDataDownloadRequest)) + }) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call) Return(_a0 *ml.GetCredentialsForTraceDataDownloadResponse, _a1 error) *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call) RunAndReturn(run func(context.Context, ml.GetCredentialsForTraceDataDownloadRequest) (*ml.GetCredentialsForTraceDataDownloadResponse, error)) *MockExperimentsInterface_GetCredentialsForTraceDataDownload_Call { + _c.Call.Return(run) + return _c +} + +// GetCredentialsForTraceDataDownloadByRequestId provides a mock function with given fields: ctx, requestId +func (_m *MockExperimentsInterface) GetCredentialsForTraceDataDownloadByRequestId(ctx context.Context, requestId string) (*ml.GetCredentialsForTraceDataDownloadResponse, error) { + ret := _m.Called(ctx, requestId) + + if len(ret) == 0 { + panic("no return value specified for GetCredentialsForTraceDataDownloadByRequestId") + } + + var r0 *ml.GetCredentialsForTraceDataDownloadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*ml.GetCredentialsForTraceDataDownloadResponse, error)); ok { + return rf(ctx, requestId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *ml.GetCredentialsForTraceDataDownloadResponse); ok { + r0 = rf(ctx, requestId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.GetCredentialsForTraceDataDownloadResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, requestId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCredentialsForTraceDataDownloadByRequestId' +type MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call struct { + *mock.Call +} + +// GetCredentialsForTraceDataDownloadByRequestId is a helper method to define mock.On call +// - ctx context.Context +// - requestId string +func (_e *MockExperimentsInterface_Expecter) GetCredentialsForTraceDataDownloadByRequestId(ctx interface{}, requestId interface{}) *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call { + return &MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call{Call: _e.mock.On("GetCredentialsForTraceDataDownloadByRequestId", ctx, requestId)} +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call) Run(run func(ctx context.Context, requestId string)) *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call) Return(_a0 *ml.GetCredentialsForTraceDataDownloadResponse, _a1 error) *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call) RunAndReturn(run func(context.Context, string) (*ml.GetCredentialsForTraceDataDownloadResponse, error)) *MockExperimentsInterface_GetCredentialsForTraceDataDownloadByRequestId_Call { + _c.Call.Return(run) + return _c +} + +// GetCredentialsForTraceDataUpload provides a mock function with given fields: ctx, request +func (_m *MockExperimentsInterface) GetCredentialsForTraceDataUpload(ctx context.Context, request ml.GetCredentialsForTraceDataUploadRequest) (*ml.GetCredentialsForTraceDataUploadResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetCredentialsForTraceDataUpload") + } + + var r0 *ml.GetCredentialsForTraceDataUploadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, ml.GetCredentialsForTraceDataUploadRequest) (*ml.GetCredentialsForTraceDataUploadResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, ml.GetCredentialsForTraceDataUploadRequest) *ml.GetCredentialsForTraceDataUploadResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.GetCredentialsForTraceDataUploadResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, ml.GetCredentialsForTraceDataUploadRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCredentialsForTraceDataUpload' +type MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call struct { + *mock.Call +} + +// GetCredentialsForTraceDataUpload is a helper method to define mock.On call +// - ctx context.Context +// - request ml.GetCredentialsForTraceDataUploadRequest +func (_e *MockExperimentsInterface_Expecter) GetCredentialsForTraceDataUpload(ctx interface{}, request interface{}) *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call { + return &MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call{Call: _e.mock.On("GetCredentialsForTraceDataUpload", ctx, request)} +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call) Run(run func(ctx context.Context, request ml.GetCredentialsForTraceDataUploadRequest)) *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(ml.GetCredentialsForTraceDataUploadRequest)) + }) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call) Return(_a0 *ml.GetCredentialsForTraceDataUploadResponse, _a1 error) *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call) RunAndReturn(run func(context.Context, ml.GetCredentialsForTraceDataUploadRequest) (*ml.GetCredentialsForTraceDataUploadResponse, error)) *MockExperimentsInterface_GetCredentialsForTraceDataUpload_Call { + _c.Call.Return(run) + return _c +} + +// GetCredentialsForTraceDataUploadByRequestId provides a mock function with given fields: ctx, requestId +func (_m *MockExperimentsInterface) GetCredentialsForTraceDataUploadByRequestId(ctx context.Context, requestId string) (*ml.GetCredentialsForTraceDataUploadResponse, error) { + ret := _m.Called(ctx, requestId) + + if len(ret) == 0 { + panic("no return value specified for GetCredentialsForTraceDataUploadByRequestId") + } + + var r0 *ml.GetCredentialsForTraceDataUploadResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*ml.GetCredentialsForTraceDataUploadResponse, error)); ok { + return rf(ctx, requestId) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *ml.GetCredentialsForTraceDataUploadResponse); ok { + r0 = rf(ctx, requestId) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*ml.GetCredentialsForTraceDataUploadResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, requestId) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetCredentialsForTraceDataUploadByRequestId' +type MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call struct { + *mock.Call +} + +// GetCredentialsForTraceDataUploadByRequestId is a helper method to define mock.On call +// - ctx context.Context +// - requestId string +func (_e *MockExperimentsInterface_Expecter) GetCredentialsForTraceDataUploadByRequestId(ctx interface{}, requestId interface{}) *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call { + return &MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call{Call: _e.mock.On("GetCredentialsForTraceDataUploadByRequestId", ctx, requestId)} +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call) Run(run func(ctx context.Context, requestId string)) *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call) Return(_a0 *ml.GetCredentialsForTraceDataUploadResponse, _a1 error) *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call) RunAndReturn(run func(context.Context, string) (*ml.GetCredentialsForTraceDataUploadResponse, error)) *MockExperimentsInterface_GetCredentialsForTraceDataUploadByRequestId_Call { + _c.Call.Return(run) + return _c +} + // GetExperiment provides a mock function with given fields: ctx, request func (_m *MockExperimentsInterface) GetExperiment(ctx context.Context, request ml.GetExperimentRequest) (*ml.GetExperimentResponse, error) { ret := _m.Called(ctx, request) diff --git a/experimental/mocks/service/pipelines/mock_pipelines_interface.go b/experimental/mocks/service/pipelines/mock_pipelines_interface.go index db41cbca0..0ac64dd7a 100644 --- a/experimental/mocks/service/pipelines/mock_pipelines_interface.go +++ b/experimental/mocks/service/pipelines/mock_pipelines_interface.go @@ -1583,67 +1583,6 @@ func (_c *MockPipelinesInterface_WaitGetPipelineIdle_Call) RunAndReturn(run func return _c } -// WaitGetPipelineRunning provides a mock function with given fields: ctx, pipelineId, timeout, callback -func (_m *MockPipelinesInterface) WaitGetPipelineRunning(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error) { - ret := _m.Called(ctx, pipelineId, timeout, callback) - - if len(ret) == 0 { - panic("no return value specified for WaitGetPipelineRunning") - } - - var r0 *pipelines.GetPipelineResponse - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string, time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error)); ok { - return rf(ctx, pipelineId, timeout, callback) - } - if rf, ok := ret.Get(0).(func(context.Context, string, time.Duration, func(*pipelines.GetPipelineResponse)) *pipelines.GetPipelineResponse); ok { - r0 = rf(ctx, pipelineId, timeout, callback) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(*pipelines.GetPipelineResponse) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, string, time.Duration, func(*pipelines.GetPipelineResponse)) error); ok { - r1 = rf(ctx, pipelineId, timeout, callback) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// MockPipelinesInterface_WaitGetPipelineRunning_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'WaitGetPipelineRunning' -type MockPipelinesInterface_WaitGetPipelineRunning_Call struct { - *mock.Call -} - -// WaitGetPipelineRunning is a helper method to define mock.On call -// - ctx context.Context -// - pipelineId string -// - timeout time.Duration -// - callback func(*pipelines.GetPipelineResponse) -func (_e *MockPipelinesInterface_Expecter) WaitGetPipelineRunning(ctx interface{}, pipelineId interface{}, timeout interface{}, callback interface{}) *MockPipelinesInterface_WaitGetPipelineRunning_Call { - return &MockPipelinesInterface_WaitGetPipelineRunning_Call{Call: _e.mock.On("WaitGetPipelineRunning", ctx, pipelineId, timeout, callback)} -} - -func (_c *MockPipelinesInterface_WaitGetPipelineRunning_Call) Run(run func(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*pipelines.GetPipelineResponse))) *MockPipelinesInterface_WaitGetPipelineRunning_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(string), args[2].(time.Duration), args[3].(func(*pipelines.GetPipelineResponse))) - }) - return _c -} - -func (_c *MockPipelinesInterface_WaitGetPipelineRunning_Call) Return(_a0 *pipelines.GetPipelineResponse, _a1 error) *MockPipelinesInterface_WaitGetPipelineRunning_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *MockPipelinesInterface_WaitGetPipelineRunning_Call) RunAndReturn(run func(context.Context, string, time.Duration, func(*pipelines.GetPipelineResponse)) (*pipelines.GetPipelineResponse, error)) *MockPipelinesInterface_WaitGetPipelineRunning_Call { - _c.Call.Return(run) - return _c -} - // NewMockPipelinesInterface creates a new instance of MockPipelinesInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMockPipelinesInterface(t interface { diff --git a/experimental/mocks/service/settings/mock_enable_export_notebook_interface.go b/experimental/mocks/service/settings/mock_enable_export_notebook_interface.go new file mode 100644 index 000000000..29cabb26b --- /dev/null +++ b/experimental/mocks/service/settings/mock_enable_export_notebook_interface.go @@ -0,0 +1,154 @@ +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package settings + +import ( + context "context" + + settings "github.com/databricks/databricks-sdk-go/service/settings" + mock "github.com/stretchr/testify/mock" +) + +// MockEnableExportNotebookInterface is an autogenerated mock type for the EnableExportNotebookInterface type +type MockEnableExportNotebookInterface struct { + mock.Mock +} + +type MockEnableExportNotebookInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockEnableExportNotebookInterface) EXPECT() *MockEnableExportNotebookInterface_Expecter { + return &MockEnableExportNotebookInterface_Expecter{mock: &_m.Mock} +} + +// GetEnableExportNotebook provides a mock function with given fields: ctx +func (_m *MockEnableExportNotebookInterface) GetEnableExportNotebook(ctx context.Context) (*settings.EnableExportNotebook, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetEnableExportNotebook") + } + + var r0 *settings.EnableExportNotebook + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*settings.EnableExportNotebook, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *settings.EnableExportNotebook); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableExportNotebook) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableExportNotebookInterface_GetEnableExportNotebook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnableExportNotebook' +type MockEnableExportNotebookInterface_GetEnableExportNotebook_Call struct { + *mock.Call +} + +// GetEnableExportNotebook is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockEnableExportNotebookInterface_Expecter) GetEnableExportNotebook(ctx interface{}) *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call { + return &MockEnableExportNotebookInterface_GetEnableExportNotebook_Call{Call: _e.mock.On("GetEnableExportNotebook", ctx)} +} + +func (_c *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call) Run(run func(ctx context.Context)) *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call) Return(_a0 *settings.EnableExportNotebook, _a1 error) *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call) RunAndReturn(run func(context.Context) (*settings.EnableExportNotebook, error)) *MockEnableExportNotebookInterface_GetEnableExportNotebook_Call { + _c.Call.Return(run) + return _c +} + +// PatchEnableExportNotebook provides a mock function with given fields: ctx, request +func (_m *MockEnableExportNotebookInterface) PatchEnableExportNotebook(ctx context.Context, request settings.UpdateEnableExportNotebookRequest) (*settings.EnableExportNotebook, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for PatchEnableExportNotebook") + } + + var r0 *settings.EnableExportNotebook + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableExportNotebookRequest) (*settings.EnableExportNotebook, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableExportNotebookRequest) *settings.EnableExportNotebook); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableExportNotebook) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateEnableExportNotebookRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchEnableExportNotebook' +type MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call struct { + *mock.Call +} + +// PatchEnableExportNotebook is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateEnableExportNotebookRequest +func (_e *MockEnableExportNotebookInterface_Expecter) PatchEnableExportNotebook(ctx interface{}, request interface{}) *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call { + return &MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call{Call: _e.mock.On("PatchEnableExportNotebook", ctx, request)} +} + +func (_c *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call) Run(run func(ctx context.Context, request settings.UpdateEnableExportNotebookRequest)) *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateEnableExportNotebookRequest)) + }) + return _c +} + +func (_c *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call) Return(_a0 *settings.EnableExportNotebook, _a1 error) *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call) RunAndReturn(run func(context.Context, settings.UpdateEnableExportNotebookRequest) (*settings.EnableExportNotebook, error)) *MockEnableExportNotebookInterface_PatchEnableExportNotebook_Call { + _c.Call.Return(run) + return _c +} + +// NewMockEnableExportNotebookInterface creates a new instance of MockEnableExportNotebookInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockEnableExportNotebookInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockEnableExportNotebookInterface { + mock := &MockEnableExportNotebookInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/settings/mock_enable_notebook_table_clipboard_interface.go b/experimental/mocks/service/settings/mock_enable_notebook_table_clipboard_interface.go new file mode 100644 index 000000000..9d4b917e2 --- /dev/null +++ b/experimental/mocks/service/settings/mock_enable_notebook_table_clipboard_interface.go @@ -0,0 +1,154 @@ +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package settings + +import ( + context "context" + + settings "github.com/databricks/databricks-sdk-go/service/settings" + mock "github.com/stretchr/testify/mock" +) + +// MockEnableNotebookTableClipboardInterface is an autogenerated mock type for the EnableNotebookTableClipboardInterface type +type MockEnableNotebookTableClipboardInterface struct { + mock.Mock +} + +type MockEnableNotebookTableClipboardInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockEnableNotebookTableClipboardInterface) EXPECT() *MockEnableNotebookTableClipboardInterface_Expecter { + return &MockEnableNotebookTableClipboardInterface_Expecter{mock: &_m.Mock} +} + +// GetEnableNotebookTableClipboard provides a mock function with given fields: ctx +func (_m *MockEnableNotebookTableClipboardInterface) GetEnableNotebookTableClipboard(ctx context.Context) (*settings.EnableNotebookTableClipboard, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetEnableNotebookTableClipboard") + } + + var r0 *settings.EnableNotebookTableClipboard + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*settings.EnableNotebookTableClipboard, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *settings.EnableNotebookTableClipboard); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableNotebookTableClipboard) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnableNotebookTableClipboard' +type MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call struct { + *mock.Call +} + +// GetEnableNotebookTableClipboard is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockEnableNotebookTableClipboardInterface_Expecter) GetEnableNotebookTableClipboard(ctx interface{}) *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call { + return &MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call{Call: _e.mock.On("GetEnableNotebookTableClipboard", ctx)} +} + +func (_c *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call) Run(run func(ctx context.Context)) *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call) Return(_a0 *settings.EnableNotebookTableClipboard, _a1 error) *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call) RunAndReturn(run func(context.Context) (*settings.EnableNotebookTableClipboard, error)) *MockEnableNotebookTableClipboardInterface_GetEnableNotebookTableClipboard_Call { + _c.Call.Return(run) + return _c +} + +// PatchEnableNotebookTableClipboard provides a mock function with given fields: ctx, request +func (_m *MockEnableNotebookTableClipboardInterface) PatchEnableNotebookTableClipboard(ctx context.Context, request settings.UpdateEnableNotebookTableClipboardRequest) (*settings.EnableNotebookTableClipboard, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for PatchEnableNotebookTableClipboard") + } + + var r0 *settings.EnableNotebookTableClipboard + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableNotebookTableClipboardRequest) (*settings.EnableNotebookTableClipboard, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableNotebookTableClipboardRequest) *settings.EnableNotebookTableClipboard); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableNotebookTableClipboard) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateEnableNotebookTableClipboardRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchEnableNotebookTableClipboard' +type MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call struct { + *mock.Call +} + +// PatchEnableNotebookTableClipboard is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateEnableNotebookTableClipboardRequest +func (_e *MockEnableNotebookTableClipboardInterface_Expecter) PatchEnableNotebookTableClipboard(ctx interface{}, request interface{}) *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call { + return &MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call{Call: _e.mock.On("PatchEnableNotebookTableClipboard", ctx, request)} +} + +func (_c *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call) Run(run func(ctx context.Context, request settings.UpdateEnableNotebookTableClipboardRequest)) *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateEnableNotebookTableClipboardRequest)) + }) + return _c +} + +func (_c *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call) Return(_a0 *settings.EnableNotebookTableClipboard, _a1 error) *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call) RunAndReturn(run func(context.Context, settings.UpdateEnableNotebookTableClipboardRequest) (*settings.EnableNotebookTableClipboard, error)) *MockEnableNotebookTableClipboardInterface_PatchEnableNotebookTableClipboard_Call { + _c.Call.Return(run) + return _c +} + +// NewMockEnableNotebookTableClipboardInterface creates a new instance of MockEnableNotebookTableClipboardInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockEnableNotebookTableClipboardInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockEnableNotebookTableClipboardInterface { + mock := &MockEnableNotebookTableClipboardInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/settings/mock_enable_results_downloading_interface.go b/experimental/mocks/service/settings/mock_enable_results_downloading_interface.go new file mode 100644 index 000000000..e25b79736 --- /dev/null +++ b/experimental/mocks/service/settings/mock_enable_results_downloading_interface.go @@ -0,0 +1,154 @@ +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package settings + +import ( + context "context" + + settings "github.com/databricks/databricks-sdk-go/service/settings" + mock "github.com/stretchr/testify/mock" +) + +// MockEnableResultsDownloadingInterface is an autogenerated mock type for the EnableResultsDownloadingInterface type +type MockEnableResultsDownloadingInterface struct { + mock.Mock +} + +type MockEnableResultsDownloadingInterface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockEnableResultsDownloadingInterface) EXPECT() *MockEnableResultsDownloadingInterface_Expecter { + return &MockEnableResultsDownloadingInterface_Expecter{mock: &_m.Mock} +} + +// GetEnableResultsDownloading provides a mock function with given fields: ctx +func (_m *MockEnableResultsDownloadingInterface) GetEnableResultsDownloading(ctx context.Context) (*settings.EnableResultsDownloading, error) { + ret := _m.Called(ctx) + + if len(ret) == 0 { + panic("no return value specified for GetEnableResultsDownloading") + } + + var r0 *settings.EnableResultsDownloading + var r1 error + if rf, ok := ret.Get(0).(func(context.Context) (*settings.EnableResultsDownloading, error)); ok { + return rf(ctx) + } + if rf, ok := ret.Get(0).(func(context.Context) *settings.EnableResultsDownloading); ok { + r0 = rf(ctx) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableResultsDownloading) + } + } + + if rf, ok := ret.Get(1).(func(context.Context) error); ok { + r1 = rf(ctx) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetEnableResultsDownloading' +type MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call struct { + *mock.Call +} + +// GetEnableResultsDownloading is a helper method to define mock.On call +// - ctx context.Context +func (_e *MockEnableResultsDownloadingInterface_Expecter) GetEnableResultsDownloading(ctx interface{}) *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call { + return &MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call{Call: _e.mock.On("GetEnableResultsDownloading", ctx)} +} + +func (_c *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call) Run(run func(ctx context.Context)) *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context)) + }) + return _c +} + +func (_c *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call) Return(_a0 *settings.EnableResultsDownloading, _a1 error) *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call) RunAndReturn(run func(context.Context) (*settings.EnableResultsDownloading, error)) *MockEnableResultsDownloadingInterface_GetEnableResultsDownloading_Call { + _c.Call.Return(run) + return _c +} + +// PatchEnableResultsDownloading provides a mock function with given fields: ctx, request +func (_m *MockEnableResultsDownloadingInterface) PatchEnableResultsDownloading(ctx context.Context, request settings.UpdateEnableResultsDownloadingRequest) (*settings.EnableResultsDownloading, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for PatchEnableResultsDownloading") + } + + var r0 *settings.EnableResultsDownloading + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableResultsDownloadingRequest) (*settings.EnableResultsDownloading, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateEnableResultsDownloadingRequest) *settings.EnableResultsDownloading); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.EnableResultsDownloading) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateEnableResultsDownloadingRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'PatchEnableResultsDownloading' +type MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call struct { + *mock.Call +} + +// PatchEnableResultsDownloading is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateEnableResultsDownloadingRequest +func (_e *MockEnableResultsDownloadingInterface_Expecter) PatchEnableResultsDownloading(ctx interface{}, request interface{}) *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call { + return &MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call{Call: _e.mock.On("PatchEnableResultsDownloading", ctx, request)} +} + +func (_c *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call) Run(run func(ctx context.Context, request settings.UpdateEnableResultsDownloadingRequest)) *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateEnableResultsDownloadingRequest)) + }) + return _c +} + +func (_c *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call) Return(_a0 *settings.EnableResultsDownloading, _a1 error) *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call) RunAndReturn(run func(context.Context, settings.UpdateEnableResultsDownloadingRequest) (*settings.EnableResultsDownloading, error)) *MockEnableResultsDownloadingInterface_PatchEnableResultsDownloading_Call { + _c.Call.Return(run) + return _c +} + +// NewMockEnableResultsDownloadingInterface creates a new instance of MockEnableResultsDownloadingInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockEnableResultsDownloadingInterface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockEnableResultsDownloadingInterface { + mock := &MockEnableResultsDownloadingInterface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/experimental/mocks/service/settings/mock_settings_interface.go b/experimental/mocks/service/settings/mock_settings_interface.go index 73f937d27..01dd451ca 100644 --- a/experimental/mocks/service/settings/mock_settings_interface.go +++ b/experimental/mocks/service/settings/mock_settings_interface.go @@ -349,6 +349,147 @@ func (_c *MockSettingsInterface_DisableLegacyDbfs_Call) RunAndReturn(run func() return _c } +// EnableExportNotebook provides a mock function with no fields +func (_m *MockSettingsInterface) EnableExportNotebook() settings.EnableExportNotebookInterface { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnableExportNotebook") + } + + var r0 settings.EnableExportNotebookInterface + if rf, ok := ret.Get(0).(func() settings.EnableExportNotebookInterface); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(settings.EnableExportNotebookInterface) + } + } + + return r0 +} + +// MockSettingsInterface_EnableExportNotebook_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnableExportNotebook' +type MockSettingsInterface_EnableExportNotebook_Call struct { + *mock.Call +} + +// EnableExportNotebook is a helper method to define mock.On call +func (_e *MockSettingsInterface_Expecter) EnableExportNotebook() *MockSettingsInterface_EnableExportNotebook_Call { + return &MockSettingsInterface_EnableExportNotebook_Call{Call: _e.mock.On("EnableExportNotebook")} +} + +func (_c *MockSettingsInterface_EnableExportNotebook_Call) Run(run func()) *MockSettingsInterface_EnableExportNotebook_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSettingsInterface_EnableExportNotebook_Call) Return(_a0 settings.EnableExportNotebookInterface) *MockSettingsInterface_EnableExportNotebook_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockSettingsInterface_EnableExportNotebook_Call) RunAndReturn(run func() settings.EnableExportNotebookInterface) *MockSettingsInterface_EnableExportNotebook_Call { + _c.Call.Return(run) + return _c +} + +// EnableNotebookTableClipboard provides a mock function with no fields +func (_m *MockSettingsInterface) EnableNotebookTableClipboard() settings.EnableNotebookTableClipboardInterface { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnableNotebookTableClipboard") + } + + var r0 settings.EnableNotebookTableClipboardInterface + if rf, ok := ret.Get(0).(func() settings.EnableNotebookTableClipboardInterface); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(settings.EnableNotebookTableClipboardInterface) + } + } + + return r0 +} + +// MockSettingsInterface_EnableNotebookTableClipboard_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnableNotebookTableClipboard' +type MockSettingsInterface_EnableNotebookTableClipboard_Call struct { + *mock.Call +} + +// EnableNotebookTableClipboard is a helper method to define mock.On call +func (_e *MockSettingsInterface_Expecter) EnableNotebookTableClipboard() *MockSettingsInterface_EnableNotebookTableClipboard_Call { + return &MockSettingsInterface_EnableNotebookTableClipboard_Call{Call: _e.mock.On("EnableNotebookTableClipboard")} +} + +func (_c *MockSettingsInterface_EnableNotebookTableClipboard_Call) Run(run func()) *MockSettingsInterface_EnableNotebookTableClipboard_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSettingsInterface_EnableNotebookTableClipboard_Call) Return(_a0 settings.EnableNotebookTableClipboardInterface) *MockSettingsInterface_EnableNotebookTableClipboard_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockSettingsInterface_EnableNotebookTableClipboard_Call) RunAndReturn(run func() settings.EnableNotebookTableClipboardInterface) *MockSettingsInterface_EnableNotebookTableClipboard_Call { + _c.Call.Return(run) + return _c +} + +// EnableResultsDownloading provides a mock function with no fields +func (_m *MockSettingsInterface) EnableResultsDownloading() settings.EnableResultsDownloadingInterface { + ret := _m.Called() + + if len(ret) == 0 { + panic("no return value specified for EnableResultsDownloading") + } + + var r0 settings.EnableResultsDownloadingInterface + if rf, ok := ret.Get(0).(func() settings.EnableResultsDownloadingInterface); ok { + r0 = rf() + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(settings.EnableResultsDownloadingInterface) + } + } + + return r0 +} + +// MockSettingsInterface_EnableResultsDownloading_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'EnableResultsDownloading' +type MockSettingsInterface_EnableResultsDownloading_Call struct { + *mock.Call +} + +// EnableResultsDownloading is a helper method to define mock.On call +func (_e *MockSettingsInterface_Expecter) EnableResultsDownloading() *MockSettingsInterface_EnableResultsDownloading_Call { + return &MockSettingsInterface_EnableResultsDownloading_Call{Call: _e.mock.On("EnableResultsDownloading")} +} + +func (_c *MockSettingsInterface_EnableResultsDownloading_Call) Run(run func()) *MockSettingsInterface_EnableResultsDownloading_Call { + _c.Call.Run(func(args mock.Arguments) { + run() + }) + return _c +} + +func (_c *MockSettingsInterface_EnableResultsDownloading_Call) Return(_a0 settings.EnableResultsDownloadingInterface) *MockSettingsInterface_EnableResultsDownloading_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockSettingsInterface_EnableResultsDownloading_Call) RunAndReturn(run func() settings.EnableResultsDownloadingInterface) *MockSettingsInterface_EnableResultsDownloading_Call { + _c.Call.Return(run) + return _c +} + // EnhancedSecurityMonitoring provides a mock function with no fields func (_m *MockSettingsInterface) EnhancedSecurityMonitoring() settings.EnhancedSecurityMonitoringInterface { ret := _m.Called() diff --git a/service/billing/model.go b/service/billing/model.go index d9199a2e2..3de61625b 100644 --- a/service/billing/model.go +++ b/service/billing/model.go @@ -253,6 +253,10 @@ type BudgetConfigurationFilterWorkspaceIdClause struct { // Contains the BudgetPolicy details. type BudgetPolicy struct { + // List of workspaces that this budget policy will be exclusively bound to. + // An empty binding implies that this budget policy is open to any workspace + // in the account. + BindingWorkspaceIds []int64 `json:"binding_workspace_ids,omitempty"` // A list of tags defined by the customer. At most 20 entries are allowed // per policy. CustomTags []compute.CustomPolicyTag `json:"custom_tags,omitempty"` diff --git a/service/catalog/model.go b/service/catalog/model.go index 2658e51f2..9708c90cd 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -549,6 +549,10 @@ const ColumnTypeNameDouble ColumnTypeName = `DOUBLE` const ColumnTypeNameFloat ColumnTypeName = `FLOAT` +const ColumnTypeNameGeography ColumnTypeName = `GEOGRAPHY` + +const ColumnTypeNameGeometry ColumnTypeName = `GEOMETRY` + const ColumnTypeNameInt ColumnTypeName = `INT` const ColumnTypeNameInterval ColumnTypeName = `INTERVAL` @@ -583,11 +587,11 @@ func (f *ColumnTypeName) String() string { // Set raw string value and validate it against allowed values func (f *ColumnTypeName) Set(v string) error { switch v { - case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT`: + case `ARRAY`, `BINARY`, `BOOLEAN`, `BYTE`, `CHAR`, `DATE`, `DECIMAL`, `DOUBLE`, `FLOAT`, `GEOGRAPHY`, `GEOMETRY`, `INT`, `INTERVAL`, `LONG`, `MAP`, `NULL`, `SHORT`, `STRING`, `STRUCT`, `TABLE_TYPE`, `TIMESTAMP`, `TIMESTAMP_NTZ`, `USER_DEFINED_TYPE`, `VARIANT`: *f = ColumnTypeName(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE", "VARIANT"`, v) + return fmt.Errorf(`value "%s" is not one of "ARRAY", "BINARY", "BOOLEAN", "BYTE", "CHAR", "DATE", "DECIMAL", "DOUBLE", "FLOAT", "GEOGRAPHY", "GEOMETRY", "INT", "INTERVAL", "LONG", "MAP", "NULL", "SHORT", "STRING", "STRUCT", "TABLE_TYPE", "TIMESTAMP", "TIMESTAMP_NTZ", "USER_DEFINED_TYPE", "VARIANT"`, v) } } @@ -1217,7 +1221,12 @@ type CreateVolumeRequestContent struct { SchemaName string `json:"schema_name"` // The storage location on the cloud StorageLocation string `json:"storage_location,omitempty"` - + // The type of the volume. An external volume is located in the specified + // external location. A managed volume is located in the default location + // which is specified by the parent schema, or the parent catalog, or the + // Metastore. [Learn more] + // + // [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external VolumeType VolumeType `json:"volume_type"` ForceSendFields []string `json:"-" url:"-"` @@ -6087,7 +6096,12 @@ type VolumeInfo struct { UpdatedBy string `json:"updated_by,omitempty"` // The unique identifier of the volume VolumeId string `json:"volume_id,omitempty"` - + // The type of the volume. An external volume is located in the specified + // external location. A managed volume is located in the default location + // which is specified by the parent schema, or the parent catalog, or the + // Metastore. [Learn more] + // + // [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external VolumeType VolumeType `json:"volume_type,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -6101,6 +6115,12 @@ func (s VolumeInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// The type of the volume. An external volume is located in the specified +// external location. A managed volume is located in the default location which +// is specified by the parent schema, or the parent catalog, or the Metastore. +// [Learn more] +// +// [Learn more]: https://docs.databricks.com/aws/en/volumes/managed-vs-external type VolumeType string const VolumeTypeExternal VolumeType = `EXTERNAL` diff --git a/service/compute/model.go b/service/compute/model.go index 37bb90553..c34ffb688 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2759,6 +2759,9 @@ type Environment struct { // project path>(WSFS or Volumes in Databricks), E.g. // dependencies: ["foo==0.0.1", "-r /Workspace/test/requirements.txt"] Dependencies []string `json:"dependencies,omitempty"` + // List of jar dependencies, should be string representing volume paths. For + // example: `/Volumes/path/to/test.jar`. + JarDependencies []string `json:"jar_dependencies,omitempty"` } type EventDetails struct { @@ -4601,6 +4604,8 @@ func (s LogSyncStatus) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type MapAny map[string]any + type MavenLibrary struct { // Gradle-style maven coordinates. For example: "org.jsoup:jsoup:1.7.2". Coordinates string `json:"coordinates"` @@ -5215,6 +5220,8 @@ const TerminationReasonCodeAllocationTimeout TerminationReasonCode = `ALLOCATION const TerminationReasonCodeAllocationTimeoutNodeDaemonNotReady TerminationReasonCode = `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY` +const TerminationReasonCodeAllocationTimeoutNoHealthyAndWarmedUpClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS` + const TerminationReasonCodeAllocationTimeoutNoHealthyClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS` const TerminationReasonCodeAllocationTimeoutNoMatchedClusters TerminationReasonCode = `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS` @@ -5319,8 +5326,14 @@ const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_ const TerminationReasonCodeDisasterRecoveryReplication TerminationReasonCode = `DISASTER_RECOVERY_REPLICATION` +const TerminationReasonCodeDockerContainerCreationException TerminationReasonCode = `DOCKER_CONTAINER_CREATION_EXCEPTION` + const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE` +const TerminationReasonCodeDockerImageTooLargeForInstanceException TerminationReasonCode = `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION` + +const TerminationReasonCodeDockerInvalidOsException TerminationReasonCode = `DOCKER_INVALID_OS_EXCEPTION` + const TerminationReasonCodeDriverEviction TerminationReasonCode = `DRIVER_EVICTION` const TerminationReasonCodeDriverLaunchTimeout TerminationReasonCode = `DRIVER_LAUNCH_TIMEOUT` @@ -5531,11 +5544,11 @@ func (f *TerminationReasonCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationReasonCode) Set(v string) error { switch v { - case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DOCKER_IMAGE_PULL_FAILURE`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: + case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: *f = TerminationReasonCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DOCKER_IMAGE_PULL_FAILURE", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) } } diff --git a/service/dashboards/api.go b/service/dashboards/api.go index b12d265ef..3e6b1ccbf 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -48,11 +48,50 @@ type GenieInterface interface { // Generate full query result download. // - // Initiate full SQL query result download and obtain a transient ID for - // tracking the download progress. This call initiates a new SQL execution to - // generate the query result. + // Initiate full SQL query result download and obtain a `download_id` to track + // the download progress. This call initiates a new SQL execution to generate + // the query result. The result is stored in an external link can be retrieved + // using the [Get Download Full Query + // Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks + // strongly recommends that you protect the URLs that are returned by the + // `EXTERNAL_LINKS` disposition. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. GenerateDownloadFullQueryResult(ctx context.Context, request GenieGenerateDownloadFullQueryResultRequest) (*GenieGenerateDownloadFullQueryResultResponse, error) + // Get download full query result. + // + // After [Generating a Full Query Result + // Download](:method:genie/getdownloadfullqueryresult) and successfully + // receiving a `download_id`, use this API to Poll download progress and + // retrieve the SQL query result external link(s) upon completion. Warning: + // Databricks strongly recommends that you protect the URLs that are returned by + // the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` + // disposition, a short-lived, presigned URL is generated, which can be used to + // download the results directly from Amazon S3. As a short-lived access + // credential is embedded in this presigned URL, you should protect the URL. + // Because presigned URLs are already generated with embedded temporary access + // credentials, you must not set an Authorization header in the download + // requests. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. + GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) + + // Get download full query result. + // + // After [Generating a Full Query Result + // Download](:method:genie/getdownloadfullqueryresult) and successfully + // receiving a `download_id`, use this API to Poll download progress and + // retrieve the SQL query result external link(s) upon completion. Warning: + // Databricks strongly recommends that you protect the URLs that are returned by + // the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` + // disposition, a short-lived, presigned URL is generated, which can be used to + // download the results directly from Amazon S3. As a short-lived access + // credential is embedded in this presigned URL, you should protect the URL. + // Because presigned URLs are already generated with embedded temporary access + // credentials, you must not set an Authorization header in the download + // requests. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. + GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*GenieGetDownloadFullQueryResultResponse, error) + // Get conversation message. // // Get message from conversation. @@ -254,6 +293,31 @@ func (a *GenieAPI) CreateMessageAndWait(ctx context.Context, genieCreateConversa return wait.Get() } +// Get download full query result. +// +// After [Generating a Full Query Result +// Download](:method:genie/getdownloadfullqueryresult) and successfully +// receiving a `download_id`, use this API to Poll download progress and +// retrieve the SQL query result external link(s) upon completion. Warning: +// Databricks strongly recommends that you protect the URLs that are returned by +// the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` +// disposition, a short-lived, presigned URL is generated, which can be used to +// download the results directly from Amazon S3. As a short-lived access +// credential is embedded in this presigned URL, you should protect the URL. +// Because presigned URLs are already generated with embedded temporary access +// credentials, you must not set an Authorization header in the download +// requests. See [Execute +// Statement](:method:statementexecution/executestatement) for more details. +func (a *GenieAPI) GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*GenieGetDownloadFullQueryResultResponse, error) { + return a.genieImpl.GetDownloadFullQueryResult(ctx, GenieGetDownloadFullQueryResultRequest{ + SpaceId: spaceId, + ConversationId: conversationId, + MessageId: messageId, + AttachmentId: attachmentId, + DownloadId: downloadId, + }) +} + // Get conversation message. // // Get message from conversation. @@ -606,6 +670,30 @@ type LakeviewEmbeddedInterface interface { // // Get the current published dashboard within an embedded context. GetPublishedDashboardEmbeddedByDashboardId(ctx context.Context, dashboardId string) error + + // Read an information of a published dashboard to mint an OAuth token. + // + // Get a required authorization details and scopes of a published dashboard to + // mint an OAuth token. The `authorization_details` can be enriched to apply + // additional restriction. + // + // Example: Adding the following `authorization_details` object to downscope the + // viewer permission to specific table ``` { type: "unity_catalog_privileges", + // privileges: ["SELECT"], object_type: "TABLE", object_full_path: + // "main.default.testdata" } ``` + GetPublishedDashboardTokenInfo(ctx context.Context, request GetPublishedDashboardTokenInfoRequest) (*GetPublishedDashboardTokenInfoResponse, error) + + // Read an information of a published dashboard to mint an OAuth token. + // + // Get a required authorization details and scopes of a published dashboard to + // mint an OAuth token. The `authorization_details` can be enriched to apply + // additional restriction. + // + // Example: Adding the following `authorization_details` object to downscope the + // viewer permission to specific table ``` { type: "unity_catalog_privileges", + // privileges: ["SELECT"], object_type: "TABLE", object_full_path: + // "main.default.testdata" } ``` + GetPublishedDashboardTokenInfoByDashboardId(ctx context.Context, dashboardId string) (*GetPublishedDashboardTokenInfoResponse, error) } func NewLakeviewEmbedded(client *client.DatabricksClient) *LakeviewEmbeddedAPI { @@ -630,6 +718,22 @@ func (a *LakeviewEmbeddedAPI) GetPublishedDashboardEmbeddedByDashboardId(ctx con }) } +// Read an information of a published dashboard to mint an OAuth token. +// +// Get a required authorization details and scopes of a published dashboard to +// mint an OAuth token. The `authorization_details` can be enriched to apply +// additional restriction. +// +// Example: Adding the following `authorization_details` object to downscope the +// viewer permission to specific table ``` { type: "unity_catalog_privileges", +// privileges: ["SELECT"], object_type: "TABLE", object_full_path: +// "main.default.testdata" } ``` +func (a *LakeviewEmbeddedAPI) GetPublishedDashboardTokenInfoByDashboardId(ctx context.Context, dashboardId string) (*GetPublishedDashboardTokenInfoResponse, error) { + return a.lakeviewEmbeddedImpl.GetPublishedDashboardTokenInfo(ctx, GetPublishedDashboardTokenInfoRequest{ + DashboardId: dashboardId, + }) +} + type QueryExecutionInterface interface { // Cancel the results for the a query for a published, embedded dashboard. diff --git a/service/dashboards/impl.go b/service/dashboards/impl.go index 0fc08d294..da4987e7e 100755 --- a/service/dashboards/impl.go +++ b/service/dashboards/impl.go @@ -50,7 +50,7 @@ func (a *genieImpl) ExecuteMessageQuery(ctx context.Context, request GenieExecut func (a *genieImpl) GenerateDownloadFullQueryResult(ctx context.Context, request GenieGenerateDownloadFullQueryResultRequest) (*GenieGenerateDownloadFullQueryResultResponse, error) { var genieGenerateDownloadFullQueryResultResponse GenieGenerateDownloadFullQueryResultResponse - path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/attachments/%v/generate-download", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) + path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/attachments/%v/downloads", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId) queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" @@ -58,6 +58,16 @@ func (a *genieImpl) GenerateDownloadFullQueryResult(ctx context.Context, request return &genieGenerateDownloadFullQueryResultResponse, err } +func (a *genieImpl) GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) { + var genieGetDownloadFullQueryResultResponse GenieGetDownloadFullQueryResultResponse + path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v/attachments/%v/downloads/%v", request.SpaceId, request.ConversationId, request.MessageId, request.AttachmentId, request.DownloadId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &genieGetDownloadFullQueryResultResponse) + return &genieGetDownloadFullQueryResultResponse, err +} + func (a *genieImpl) GetMessage(ctx context.Context, request GenieGetConversationMessageRequest) (*GenieMessage, error) { var genieMessage GenieMessage path := fmt.Sprintf("/api/2.0/genie/spaces/%v/conversations/%v/messages/%v", request.SpaceId, request.ConversationId, request.MessageId) @@ -419,6 +429,16 @@ func (a *lakeviewEmbeddedImpl) GetPublishedDashboardEmbedded(ctx context.Context return err } +func (a *lakeviewEmbeddedImpl) GetPublishedDashboardTokenInfo(ctx context.Context, request GetPublishedDashboardTokenInfoRequest) (*GetPublishedDashboardTokenInfoResponse, error) { + var getPublishedDashboardTokenInfoResponse GetPublishedDashboardTokenInfoResponse + path := fmt.Sprintf("/api/2.0/lakeview/dashboards/%v/published/tokeninfo", request.DashboardId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getPublishedDashboardTokenInfoResponse) + return &getPublishedDashboardTokenInfoResponse, err +} + // unexported type that holds implementations of just QueryExecution API methods type queryExecutionImpl struct { client *client.DatabricksClient diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index 983d37125..0df971d5f 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -33,11 +33,33 @@ type GenieService interface { // Generate full query result download. // - // Initiate full SQL query result download and obtain a transient ID for - // tracking the download progress. This call initiates a new SQL execution - // to generate the query result. + // Initiate full SQL query result download and obtain a `download_id` to + // track the download progress. This call initiates a new SQL execution to + // generate the query result. The result is stored in an external link can + // be retrieved using the [Get Download Full Query + // Result](:method:genie/getdownloadfullqueryresult) API. Warning: + // Databricks strongly recommends that you protect the URLs that are + // returned by the `EXTERNAL_LINKS` disposition. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. GenerateDownloadFullQueryResult(ctx context.Context, request GenieGenerateDownloadFullQueryResultRequest) (*GenieGenerateDownloadFullQueryResultResponse, error) + // Get download full query result. + // + // After [Generating a Full Query Result + // Download](:method:genie/getdownloadfullqueryresult) and successfully + // receiving a `download_id`, use this API to Poll download progress and + // retrieve the SQL query result external link(s) upon completion. Warning: + // Databricks strongly recommends that you protect the URLs that are + // returned by the `EXTERNAL_LINKS` disposition. When you use the + // `EXTERNAL_LINKS` disposition, a short-lived, presigned URL is generated, + // which can be used to download the results directly from Amazon S3. As a + // short-lived access credential is embedded in this presigned URL, you + // should protect the URL. Because presigned URLs are already generated with + // embedded temporary access credentials, you must not set an Authorization + // header in the download requests. See [Execute + // Statement](:method:statementexecution/executestatement) for more details. + GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) + // Get conversation message. // // Get message from conversation. @@ -164,6 +186,18 @@ type LakeviewEmbeddedService interface { // // Get the current published dashboard within an embedded context. GetPublishedDashboardEmbedded(ctx context.Context, request GetPublishedDashboardEmbeddedRequest) error + + // Read an information of a published dashboard to mint an OAuth token. + // + // Get a required authorization details and scopes of a published dashboard + // to mint an OAuth token. The `authorization_details` can be enriched to + // apply additional restriction. + // + // Example: Adding the following `authorization_details` object to downscope + // the viewer permission to specific table ``` { type: + // "unity_catalog_privileges", privileges: ["SELECT"], object_type: "TABLE", + // object_full_path: "main.default.testdata" } ``` + GetPublishedDashboardTokenInfo(ctx context.Context, request GetPublishedDashboardTokenInfoRequest) (*GetPublishedDashboardTokenInfoResponse, error) } // Query execution APIs for AI / BI Dashboards diff --git a/service/dashboards/model.go b/service/dashboards/model.go index 3482b74a0..b9b9b168a 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -9,6 +9,48 @@ import ( "github.com/databricks/databricks-sdk-go/service/sql" ) +type AuthorizationDetails struct { + // Represents downscoped permission rules with specific access rights. This + // field is specific to `workspace_rule_set` constraint. + GrantRules []AuthorizationDetailsGrantRule `json:"grant_rules,omitempty"` + // The acl path of the tree store resource resource. + ResourceLegacyAclPath string `json:"resource_legacy_acl_path,omitempty"` + // The resource name to which the authorization rule applies. This field is + // specific to `workspace_rule_set` constraint. Format: + // `workspaces/{workspace_id}/dashboards/{dashboard_id}` + ResourceName string `json:"resource_name,omitempty"` + // The type of authorization downscoping policy. Ex: `workspace_rule_set` + // defines access rules for a specific workspace resource + Type string `json:"type,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AuthorizationDetails) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AuthorizationDetails) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AuthorizationDetailsGrantRule struct { + // Permission sets for dashboard are defined in + // iam-common/rbac-common/permission-sets/definitions/TreeStoreBasePermissionSets + // Ex: `permissionSets/dashboard.runner` + PermissionSet string `json:"permission_set,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AuthorizationDetailsGrantRule) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AuthorizationDetailsGrantRule) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Cancel the results for the a query for a published, embedded dashboard type CancelPublishedQueryExecutionRequest struct { DashboardName string `json:"-" url:"dashboard_name"` @@ -317,13 +359,9 @@ type GenieGenerateDownloadFullQueryResultRequest struct { } type GenieGenerateDownloadFullQueryResultResponse struct { - // Error message if Genie failed to download the result - Error string `json:"error,omitempty"` - // Download result status - Status MessageStatus `json:"status,omitempty"` - // Transient Statement ID. Use this ID to track the download request in - // subsequent polling calls - TransientStatementId string `json:"transient_statement_id,omitempty"` + // Download ID. Use this ID to track the download request in subsequent + // polling calls + DownloadId string `json:"download_id,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -348,6 +386,27 @@ type GenieGetConversationMessageRequest struct { SpaceId string `json:"-" url:"-"` } +// Get download full query result +type GenieGetDownloadFullQueryResultRequest struct { + // Attachment ID + AttachmentId string `json:"-" url:"-"` + // Conversation ID + ConversationId string `json:"-" url:"-"` + // Download ID. This ID is provided by the [Generate Download + // endpoint](:method:genie/generateDownloadFullQueryResult) + DownloadId string `json:"-" url:"-"` + // Message ID + MessageId string `json:"-" url:"-"` + // Space ID + SpaceId string `json:"-" url:"-"` +} + +type GenieGetDownloadFullQueryResultResponse struct { + // SQL Statement Execution response. See [Get status, manifest, and result + // first chunk](:method:statementexecution/getstatement) for more details. + StatementResponse *sql.StatementResponse `json:"statement_response,omitempty"` +} + // Get message attachment SQL query result type GenieGetMessageAttachmentQueryResultRequest struct { // Attachment ID @@ -553,6 +612,49 @@ type GetPublishedDashboardRequest struct { DashboardId string `json:"-" url:"-"` } +// Read an information of a published dashboard to mint an OAuth token. +type GetPublishedDashboardTokenInfoRequest struct { + // UUID identifying the published dashboard. + DashboardId string `json:"-" url:"-"` + // Provided external value to be included in the custom claim. + ExternalValue string `json:"-" url:"external_value,omitempty"` + // Provided external viewer id to be included in the custom claim. + ExternalViewerId string `json:"-" url:"external_viewer_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GetPublishedDashboardTokenInfoRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPublishedDashboardTokenInfoRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type GetPublishedDashboardTokenInfoResponse struct { + // Authorization constraints for accessing the published dashboard. + // Currently includes `workspace_rule_set` and could be enriched with + // `unity_catalog_privileges` before oAuth token generation. + AuthorizationDetails []AuthorizationDetails `json:"authorization_details,omitempty"` + // Custom claim generated from external_value and external_viewer_id. + // Format: + // `urn:aibi:external_data:::` + CustomClaim string `json:"custom_claim,omitempty"` + // Scope defining access permissions. + Scope string `json:"scope,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *GetPublishedDashboardTokenInfoResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s GetPublishedDashboardTokenInfoResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get dashboard schedule type GetScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. diff --git a/service/jobs/api.go b/service/jobs/api.go index bbb59f854..1eb5da1c1 100755 --- a/service/jobs/api.go +++ b/service/jobs/api.go @@ -82,22 +82,28 @@ type JobsInterface interface { // // Retrieves the details for a single job. // - // In Jobs API 2.2, requests for a single job support pagination of `tasks` and - // `job_clusters` when either exceeds 100 elements. Use the `next_page_token` - // field to check for more results and pass its value as the `page_token` in - // subsequent requests. Arrays with fewer than 100 elements in a page will be - // empty on later pages. + // Large arrays in the results will be paginated when they exceed 100 elements. + // A request for a single job will return all properties for that job, and the + // first 100 elements of array properties (`tasks`, `job_clusters`, + // `environments` and `parameters`). Use the `next_page_token` field to check + // for more results and pass its value as the `page_token` in subsequent + // requests. If any array properties have more than 100 elements, additional + // results will be returned on subsequent requests. Arrays without additional + // results will be empty on later pages. Get(ctx context.Context, request GetJobRequest) (*Job, error) // Get a single job. // // Retrieves the details for a single job. // - // In Jobs API 2.2, requests for a single job support pagination of `tasks` and - // `job_clusters` when either exceeds 100 elements. Use the `next_page_token` - // field to check for more results and pass its value as the `page_token` in - // subsequent requests. Arrays with fewer than 100 elements in a page will be - // empty on later pages. + // Large arrays in the results will be paginated when they exceed 100 elements. + // A request for a single job will return all properties for that job, and the + // first 100 elements of array properties (`tasks`, `job_clusters`, + // `environments` and `parameters`). Use the `next_page_token` field to check + // for more results and pass its value as the `page_token` in subsequent + // requests. If any array properties have more than 100 elements, additional + // results will be returned on subsequent requests. Arrays without additional + // results will be empty on later pages. GetByJobId(ctx context.Context, jobId int64) (*Job, error) // Get job permission levels. @@ -126,11 +132,14 @@ type JobsInterface interface { // // Retrieves the metadata of a run. // - // In Jobs API 2.2, requests for a single job run support pagination of `tasks` - // and `job_clusters` when either exceeds 100 elements. Use the - // `next_page_token` field to check for more results and pass its value as the - // `page_token` in subsequent requests. Arrays with fewer than 100 elements in a - // page will be empty on later pages. + // Large arrays in the results will be paginated when they exceed 100 elements. + // A request for a single run will return all properties for that run, and the + // first 100 elements of array properties (`tasks`, `job_clusters`, + // `job_parameters` and `repair_history`). Use the next_page_token field to + // check for more results and pass its value as the page_token in subsequent + // requests. If any array properties have more than 100 elements, additional + // results will be returned on subsequent requests. Arrays without additional + // results will be empty on later pages. GetRun(ctx context.Context, request GetRunRequest) (*Run, error) // Get the output for a single run. @@ -449,11 +458,14 @@ func (a *JobsAPI) DeleteRunByRunId(ctx context.Context, runId int64) error { // // Retrieves the details for a single job. // -// In Jobs API 2.2, requests for a single job support pagination of `tasks` and -// `job_clusters` when either exceeds 100 elements. Use the `next_page_token` -// field to check for more results and pass its value as the `page_token` in -// subsequent requests. Arrays with fewer than 100 elements in a page will be -// empty on later pages. +// Large arrays in the results will be paginated when they exceed 100 elements. +// A request for a single job will return all properties for that job, and the +// first 100 elements of array properties (`tasks`, `job_clusters`, +// `environments` and `parameters`). Use the `next_page_token` field to check +// for more results and pass its value as the `page_token` in subsequent +// requests. If any array properties have more than 100 elements, additional +// results will be returned on subsequent requests. Arrays without additional +// results will be empty on later pages. func (a *JobsAPI) GetByJobId(ctx context.Context, jobId int64) (*Job, error) { return a.jobsImpl.Get(ctx, GetJobRequest{ JobId: jobId, diff --git a/service/jobs/interface.go b/service/jobs/interface.go index be9b5518d..a012a92b7 100755 --- a/service/jobs/interface.go +++ b/service/jobs/interface.go @@ -62,11 +62,14 @@ type JobsService interface { // // Retrieves the details for a single job. // - // In Jobs API 2.2, requests for a single job support pagination of `tasks` - // and `job_clusters` when either exceeds 100 elements. Use the + // Large arrays in the results will be paginated when they exceed 100 + // elements. A request for a single job will return all properties for that + // job, and the first 100 elements of array properties (`tasks`, + // `job_clusters`, `environments` and `parameters`). Use the // `next_page_token` field to check for more results and pass its value as - // the `page_token` in subsequent requests. Arrays with fewer than 100 - // elements in a page will be empty on later pages. + // the `page_token` in subsequent requests. If any array properties have + // more than 100 elements, additional results will be returned on subsequent + // requests. Arrays without additional results will be empty on later pages. Get(ctx context.Context, request GetJobRequest) (*Job, error) // Get job permission levels. @@ -84,11 +87,14 @@ type JobsService interface { // // Retrieves the metadata of a run. // - // In Jobs API 2.2, requests for a single job run support pagination of - // `tasks` and `job_clusters` when either exceeds 100 elements. Use the - // `next_page_token` field to check for more results and pass its value as - // the `page_token` in subsequent requests. Arrays with fewer than 100 - // elements in a page will be empty on later pages. + // Large arrays in the results will be paginated when they exceed 100 + // elements. A request for a single run will return all properties for that + // run, and the first 100 elements of array properties (`tasks`, + // `job_clusters`, `job_parameters` and `repair_history`). Use the + // next_page_token field to check for more results and pass its value as the + // page_token in subsequent requests. If any array properties have more than + // 100 elements, additional results will be returned on subsequent requests. + // Arrays without additional results will be empty on later pages. GetRun(ctx context.Context, request GetRunRequest) (*Run, error) // Get the output for a single run. diff --git a/service/jobs/model.go b/service/jobs/model.go index 4272ce10f..eb7628933 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -9,6 +9,33 @@ import ( "github.com/databricks/databricks-sdk-go/service/compute" ) +type AuthenticationMethod string + +const AuthenticationMethodOauth AuthenticationMethod = `OAUTH` + +const AuthenticationMethodPat AuthenticationMethod = `PAT` + +// String representation for [fmt.Print] +func (f *AuthenticationMethod) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AuthenticationMethod) Set(v string) error { + switch v { + case `OAUTH`, `PAT`: + *f = AuthenticationMethod(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "OAUTH", "PAT"`, v) + } +} + +// Type always returns AuthenticationMethod to satisfy [pflag.Value] interface +func (f *AuthenticationMethod) Type() string { + return "AuthenticationMethod" +} + type BaseJob struct { // The time at which this job was created in epoch milliseconds // (milliseconds since 1/1/1970 UTC). @@ -23,7 +50,7 @@ type BaseJob struct { // based on accessible budget policies of the run_as identity on job // creation or modification. EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` - // Indicates if the job has more sub-resources (`tasks`, `job_clusters`) + // Indicates if the job has more array properties (`tasks`, `job_clusters`) // that are not shown. They can be accessed via :method:jobs/get endpoint. // It is only relevant for API 2.2 :method:jobs/list requests with // `expand_tasks=true`. @@ -72,10 +99,14 @@ type BaseRun struct { CreatorUserName string `json:"creator_user_name,omitempty"` // Description of the run Description string `json:"description,omitempty"` - // effective_performance_target is the actual performance target used by the - // run during execution. effective_performance_target can differ from the - // client-set performance_target depending on if the job was eligible to be - // cost-optimized. + // The actual performance target used by the serverless run during + // execution. This can differ from the client-set performance target on the + // request depending on whether the performance mode is supported by the job + // type. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. @@ -99,7 +130,7 @@ type BaseRun struct { // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` - // Indicates if the run has more sub-resources (`tasks`, `job_clusters`) + // Indicates if the run has more array properties (`tasks`, `job_clusters`) // that are not shown. They can be accessed via :method:jobs/getrun // endpoint. It is only relevant for API 2.2 :method:jobs/listruns requests // with `expand_tasks=true`. @@ -427,7 +458,6 @@ func (s ClusterSpec) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Next field: 4 type ComputeConfig struct { // IDof the GPU pool to use. GpuNodePoolId string `json:"gpu_node_pool_id"` @@ -596,9 +626,7 @@ type CreateJob struct { Health *JobsHealthRules `json:"health,omitempty"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. - // You must declare dependent libraries in task settings. If more than 100 - // job clusters are available, you can paginate through them using - // :method:jobs/get. + // You must declare dependent libraries in task settings. JobClusters []JobCluster `json:"job_clusters,omitempty"` // An optional maximum allowed number of concurrent runs of the job. Set // this value if you want to be able to execute multiple runs of the same @@ -621,8 +649,13 @@ type CreateJob struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` - // PerformanceTarget defines how performant or cost efficient the execution - // of run on serverless should be. + // The performance mode on a serverless job. The performance target + // determines the level of compute performance or cost-efficiency for the + // run. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` @@ -641,10 +674,13 @@ type CreateJob struct { // limitations as cluster tags. A maximum of 25 tags can be added to the // job. Tags map[string]string `json:"tags,omitempty"` - // A list of task specifications to be executed by this job. If more than - // 100 tasks are available, you can paginate through them using - // :method:jobs/get. Use the `next_page_token` field at the object root to - // determine if more results are available. + // A list of task specifications to be executed by this job. It supports up + // to 1000 elements in write endpoints (:method:jobs/create, + // :method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read + // endpoints return only 100 tasks. If more than 100 tasks are available, + // you can paginate through them using :method:jobs/get. Use the + // `next_page_token` field at the object root to determine if more results + // are available. Tasks []Task `json:"tasks,omitempty"` // An optional timeout applied to each run of this job. A value of `0` means // no timeout. @@ -699,6 +735,46 @@ type CronSchedule struct { TimezoneId string `json:"timezone_id"` } +type DashboardPageSnapshot struct { + PageDisplayName string `json:"page_display_name,omitempty"` + + WidgetErrorDetails []WidgetErrorDetail `json:"widget_error_details,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *DashboardPageSnapshot) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DashboardPageSnapshot) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Configures the Lakeview Dashboard job task type. +type DashboardTask struct { + DashboardId string `json:"dashboard_id,omitempty"` + + Subscription *Subscription `json:"subscription,omitempty"` + // The warehouse id to execute the dashboard with for the schedule + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *DashboardTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s DashboardTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type DashboardTaskOutput struct { + // Should only be populated for manual PDF download jobs. + PageSnapshots []DashboardPageSnapshot `json:"page_snapshots,omitempty"` +} + type DbtOutput struct { // An optional map of headers to send when retrieving the artifact from the // `artifacts_link`. @@ -1000,11 +1076,10 @@ func (f *Format) Type() string { return "Format" } -// Next field: 9 type GenAiComputeTask struct { // Command launcher to run the actual script, e.g. bash, python etc. Command string `json:"command,omitempty"` - // Next field: 4 + Compute *ComputeConfig `json:"compute,omitempty"` // Runtime image DlRuntimeImage string `json:"dl_runtime_image"` @@ -1067,8 +1142,8 @@ type GetJobRequest struct { // The canonical identifier of the job to retrieve information about. This // field is required. JobId int64 `json:"-" url:"job_id"` - // Use `next_page_token` returned from the previous GetJob to request the - // next page of the job's sub-resources. + // Use `next_page_token` returned from the previous GetJob response to + // request the next page of the job's array properties. PageToken string `json:"-" url:"page_token,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1124,8 +1199,8 @@ type GetRunRequest struct { IncludeHistory bool `json:"-" url:"include_history,omitempty"` // Whether to include resolved parameter values in the response. IncludeResolvedValues bool `json:"-" url:"include_resolved_values,omitempty"` - // Use `next_page_token` returned from the previous GetRun to request the - // next page of the run's sub-resources. + // Use `next_page_token` returned from the previous GetRun response to + // request the next page of the run's array properties. PageToken string `json:"-" url:"page_token,omitempty"` // The canonical identifier of the run for which to retrieve the metadata. // This field is required. @@ -1258,14 +1333,14 @@ type Job struct { // based on accessible budget policies of the run_as identity on job // creation or modification. EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` - // Indicates if the job has more sub-resources (`tasks`, `job_clusters`) + // Indicates if the job has more array properties (`tasks`, `job_clusters`) // that are not shown. They can be accessed via :method:jobs/get endpoint. // It is only relevant for API 2.2 :method:jobs/list requests with // `expand_tasks=true`. HasMore bool `json:"has_more,omitempty"` // The canonical identifier for this job. JobId int64 `json:"job_id,omitempty"` - // A token that can be used to list the next page of sub-resources. + // A token that can be used to list the next page of array properties. NextPageToken string `json:"next_page_token,omitempty"` // The email of an active workspace user or the application ID of a service // principal that the job runs as. This value can be changed by setting the @@ -1709,9 +1784,7 @@ type JobSettings struct { Health *JobsHealthRules `json:"health,omitempty"` // A list of job cluster specifications that can be shared and reused by // tasks of this job. Libraries cannot be declared in a shared job cluster. - // You must declare dependent libraries in task settings. If more than 100 - // job clusters are available, you can paginate through them using - // :method:jobs/get. + // You must declare dependent libraries in task settings. JobClusters []JobCluster `json:"job_clusters,omitempty"` // An optional maximum allowed number of concurrent runs of the job. Set // this value if you want to be able to execute multiple runs of the same @@ -1734,8 +1807,13 @@ type JobSettings struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` - // PerformanceTarget defines how performant or cost efficient the execution - // of run on serverless should be. + // The performance mode on a serverless job. The performance target + // determines the level of compute performance or cost-efficiency for the + // run. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // The queue settings of the job. Queue *QueueSettings `json:"queue,omitempty"` @@ -1754,10 +1832,13 @@ type JobSettings struct { // limitations as cluster tags. A maximum of 25 tags can be added to the // job. Tags map[string]string `json:"tags,omitempty"` - // A list of task specifications to be executed by this job. If more than - // 100 tasks are available, you can paginate through them using - // :method:jobs/get. Use the `next_page_token` field at the object root to - // determine if more results are available. + // A list of task specifications to be executed by this job. It supports up + // to 1000 elements in write endpoints (:method:jobs/create, + // :method:jobs/reset, :method:jobs/update, :method:jobs/submit). Read + // endpoints return only 100 tasks. If more than 100 tasks are available, + // you can paginate through them using :method:jobs/get. Use the + // `next_page_token` field at the object root to determine if more results + // are available. Tasks []Task `json:"tasks,omitempty"` // An optional timeout applied to each run of this job. A value of `0` means // no timeout. @@ -1996,9 +2077,9 @@ func (s ListJobComplianceRequest) MarshalJSON() ([]byte, error) { // List jobs type ListJobsRequest struct { - // Whether to include task and cluster details in the response. Note that in - // API 2.2, only the first 100 elements will be shown. Use :method:jobs/get - // to paginate through all tasks and clusters. + // Whether to include task and cluster details in the response. Note that + // only the first 100 elements will be shown. Use :method:jobs/get to + // paginate through all tasks and clusters. ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"` // The number of jobs to return. This value must be greater than 0 and less // or equal to 100. The default value is 20. @@ -2060,9 +2141,9 @@ type ListRunsRequest struct { // results; otherwise, lists both active and completed runs. This field // cannot be `true` when active_only is `true`. CompletedOnly bool `json:"-" url:"completed_only,omitempty"` - // Whether to include task and cluster details in the response. Note that in - // API 2.2, only the first 100 elements will be shown. Use - // :method:jobs/getrun to paginate through all tasks and clusters. + // Whether to include task and cluster details in the response. Note that + // only the first 100 elements will be shown. Use :method:jobs/getrun to + // paginate through all tasks and clusters. ExpandTasks bool `json:"-" url:"expand_tasks,omitempty"` // The job for which to list runs. If omitted, the Jobs service lists runs // from all jobs. @@ -2253,12 +2334,10 @@ func (f *PauseStatus) Type() string { // Cluster Manager (see cluster-common PerformanceTarget). type PerformanceTarget string -const PerformanceTargetBalanced PerformanceTarget = `BALANCED` - -const PerformanceTargetCostOptimized PerformanceTarget = `COST_OPTIMIZED` - const PerformanceTargetPerformanceOptimized PerformanceTarget = `PERFORMANCE_OPTIMIZED` +const PerformanceTargetStandard PerformanceTarget = `STANDARD` + // String representation for [fmt.Print] func (f *PerformanceTarget) String() string { return string(*f) @@ -2267,11 +2346,11 @@ func (f *PerformanceTarget) String() string { // Set raw string value and validate it against allowed values func (f *PerformanceTarget) Set(v string) error { switch v { - case `BALANCED`, `COST_OPTIMIZED`, `PERFORMANCE_OPTIMIZED`: + case `PERFORMANCE_OPTIMIZED`, `STANDARD`: *f = PerformanceTarget(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BALANCED", "COST_OPTIMIZED", "PERFORMANCE_OPTIMIZED"`, v) + return fmt.Errorf(`value "%s" is not one of "PERFORMANCE_OPTIMIZED", "STANDARD"`, v) } } @@ -2348,6 +2427,74 @@ func (s PipelineTask) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type PowerBiModel struct { + // How the published Power BI model authenticates to Databricks + AuthenticationMethod AuthenticationMethod `json:"authentication_method,omitempty"` + // The name of the Power BI model + ModelName string `json:"model_name,omitempty"` + // Whether to overwrite existing Power BI models + OverwriteExisting bool `json:"overwrite_existing,omitempty"` + // The default storage mode of the Power BI model + StorageMode StorageMode `json:"storage_mode,omitempty"` + // The name of the Power BI workspace of the model + WorkspaceName string `json:"workspace_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *PowerBiModel) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PowerBiModel) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PowerBiTable struct { + // The catalog name in Databricks + Catalog string `json:"catalog,omitempty"` + // The table name in Databricks + Name string `json:"name,omitempty"` + // The schema name in Databricks + Schema string `json:"schema,omitempty"` + // The Power BI storage mode of the table + StorageMode StorageMode `json:"storage_mode,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *PowerBiTable) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PowerBiTable) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type PowerBiTask struct { + // The resource name of the UC connection to authenticate from Databricks to + // Power BI + ConnectionResourceName string `json:"connection_resource_name,omitempty"` + // The semantic model to update + PowerBiModel *PowerBiModel `json:"power_bi_model,omitempty"` + // Whether the model should be refreshed after the update + RefreshAfterUpdate bool `json:"refresh_after_update,omitempty"` + // The tables to be exported to Power BI + Tables []PowerBiTable `json:"tables,omitempty"` + // The SQL warehouse ID to use as the Power BI data source + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *PowerBiTask) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PowerBiTask) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type PythonWheelTask struct { // Named entry point to use, if it does not exist in the metadata of the // package it executes the function from the package directly using @@ -2727,10 +2874,14 @@ type Run struct { CreatorUserName string `json:"creator_user_name,omitempty"` // Description of the run Description string `json:"description,omitempty"` - // effective_performance_target is the actual performance target used by the - // run during execution. effective_performance_target can differ from the - // client-set performance_target depending on if the job was eligible to be - // cost-optimized. + // The actual performance target used by the serverless run during + // execution. This can differ from the client-set performance target on the + // request depending on whether the performance mode is supported by the job + // type. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The time at which this run ended in epoch milliseconds (milliseconds // since 1/1/1970 UTC). This field is set to 0 if the job is still running. @@ -2754,7 +2905,7 @@ type Run struct { // Note: dbt and SQL File tasks support only version-controlled sources. If // dbt or SQL File tasks are used, `git_source` must be defined on the job. GitSource *GitSource `json:"git_source,omitempty"` - // Indicates if the run has more sub-resources (`tasks`, `job_clusters`) + // Indicates if the run has more array properties (`tasks`, `job_clusters`) // that are not shown. They can be accessed via :method:jobs/getrun // endpoint. It is only relevant for API 2.2 :method:jobs/listruns requests // with `expand_tasks=true`. @@ -2777,7 +2928,7 @@ type Run struct { // field is populated with the ID of the job run that the task run belongs // to. JobRunId int64 `json:"job_run_id,omitempty"` - // A token that can be used to list the next page of sub-resources. + // A token that can be used to list the next page of array properties. NextPageToken string `json:"next_page_token,omitempty"` // A unique identifier for this job run. This is set to the same value as // `run_id`. @@ -3245,9 +3396,14 @@ type RunNow struct { // A list of task keys to run inside of the job. If this field is not // provided, all tasks in the job will be run. Only []string `json:"only,omitempty"` - // PerformanceTarget defines how performant or cost efficient the execution - // of run on serverless compute should be. For RunNow, this performance - // target will override the target defined on the job-level. + // The performance mode on a serverless job. The performance target + // determines the level of compute performance or cost-efficiency for the + // run. This field overrides the performance target defined on the job + // level. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` @@ -3331,6 +3487,8 @@ func (s RunNowResponse) MarshalJSON() ([]byte, error) { type RunOutput struct { // The output of a clean rooms notebook task, if available CleanRoomsNotebookOutput *CleanRoomsNotebookTaskCleanRoomsNotebookTaskOutput `json:"clean_rooms_notebook_output,omitempty"` + // The output of a dashboard task, if available + DashboardOutput *DashboardTaskOutput `json:"dashboard_output,omitempty"` // The output of a dbt task, if available. DbtOutput *DbtOutput `json:"dbt_output,omitempty"` // An error message indicating why a task failed or why output is not @@ -3599,6 +3757,8 @@ type RunTask struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *RunConditionTask `json:"condition_task,omitempty"` + // The task runs a DashboardTask when the `dashboard_task` field is present. + DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use // a serverless or a pro SQL warehouse. @@ -3610,13 +3770,16 @@ type RunTask struct { DependsOn []TaskDependency `json:"depends_on,omitempty"` // An optional description for this task. Description string `json:"description,omitempty"` - // Denotes whether or not the task was disabled by the user. Disabled tasks - // do not execute and are immediately skipped as soon as they are unblocked. + // Deprecated, field was never used in production. Disabled bool `json:"disabled,omitempty"` - // effective_performance_target is the actual performance target used by the - // run during execution. effective_performance_target can differ from the - // client-set performance_target depending on if the job was eligible to be - // cost-optimized. + // The actual performance target used by the serverless run during + // execution. This can differ from the client-set performance target on the + // request depending on whether the performance mode is supported by the job + // type. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // An optional set of email addresses notified when the task run begins or // completes. The default behavior is to not send any emails. @@ -3644,7 +3807,7 @@ type RunTask struct { // The task executes a nested task for every input provided when the // `for_each_task` field is present. ForEachTask *RunForEachTask `json:"for_each_task,omitempty"` - // Next field: 9 + GenAiComputeTask *GenAiComputeTask `json:"gen_ai_compute_task,omitempty"` // An optional specification for a remote Git repository containing the // source code used by tasks. Version-controlled source code is supported by @@ -3673,6 +3836,9 @@ type RunTask struct { // The task triggers a pipeline update when the `pipeline_task` field is // present. Only pipelines configured to use triggered more are supported. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` + // The task triggers a Power BI semantic model update when the + // `power_bi_task` field is present. + PowerBiTask *PowerBiTask `json:"power_bi_task,omitempty"` // The task runs a Python wheel when the `python_wheel_task` field is // present. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` @@ -4210,6 +4376,35 @@ func (s SqlTaskSubscription) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type StorageMode string + +const StorageModeDirectQuery StorageMode = `DIRECT_QUERY` + +const StorageModeDual StorageMode = `DUAL` + +const StorageModeImport StorageMode = `IMPORT` + +// String representation for [fmt.Print] +func (f *StorageMode) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *StorageMode) Set(v string) error { + switch v { + case `DIRECT_QUERY`, `DUAL`, `IMPORT`: + *f = StorageMode(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "DIRECT_QUERY", "DUAL", "IMPORT"`, v) + } +} + +// Type always returns StorageMode to satisfy [pflag.Value] interface +func (f *StorageMode) Type() string { + return "StorageMode" +} + type SubmitRun struct { // List of permissions to set on the job. AccessControlList []JobAccessControlRequest `json:"access_control_list,omitempty"` @@ -4309,6 +4504,8 @@ type SubmitTask struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` + // The task runs a DashboardTask when the `dashboard_task` field is present. + DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use // a serverless or a pro SQL warehouse. @@ -4335,7 +4532,7 @@ type SubmitTask struct { // The task executes a nested task for every input provided when the // `for_each_task` field is present. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` - // Next field: 9 + GenAiComputeTask *GenAiComputeTask `json:"gen_ai_compute_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` @@ -4354,6 +4551,9 @@ type SubmitTask struct { // The task triggers a pipeline update when the `pipeline_task` field is // present. Only pipelines configured to use triggered more are supported. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` + // The task triggers a Power BI semantic model update when the + // `power_bi_task` field is present. + PowerBiTask *PowerBiTask `json:"power_bi_task,omitempty"` // The task runs a Python wheel when the `python_wheel_task` field is // present. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` @@ -4415,6 +4615,42 @@ func (s SubmitTask) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type Subscription struct { + // Optional: Allows users to specify a custom subject line on the email sent + // to subscribers. + CustomSubject string `json:"custom_subject,omitempty"` + // When true, the subscription will not send emails. + Paused bool `json:"paused,omitempty"` + + Subscribers []SubscriptionSubscriber `json:"subscribers,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *Subscription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s Subscription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type SubscriptionSubscriber struct { + DestinationId string `json:"destination_id,omitempty"` + + UserName string `json:"user_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *SubscriptionSubscriber) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SubscriptionSubscriber) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type TableUpdateTriggerConfiguration struct { // The table(s) condition based on which to trigger a job run. Condition Condition `json:"condition,omitempty"` @@ -4453,6 +4689,8 @@ type Task struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` + // The task runs a DashboardTask when the `dashboard_task` field is present. + DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use // a serverless or a pro SQL warehouse. @@ -4482,7 +4720,7 @@ type Task struct { // The task executes a nested task for every input provided when the // `for_each_task` field is present. ForEachTask *ForEachTask `json:"for_each_task,omitempty"` - // Next field: 9 + GenAiComputeTask *GenAiComputeTask `json:"gen_ai_compute_task,omitempty"` // An optional set of health rules that can be defined for this job. Health *JobsHealthRules `json:"health,omitempty"` @@ -4513,6 +4751,9 @@ type Task struct { // The task triggers a pipeline update when the `pipeline_task` field is // present. Only pipelines configured to use triggered more are supported. PipelineTask *PipelineTask `json:"pipeline_task,omitempty"` + // The task triggers a Power BI semantic model update when the + // `power_bi_task` field is present. + PowerBiTask *PowerBiTask `json:"power_bi_task,omitempty"` // The task runs a Python wheel when the `python_wheel_task` field is // present. PythonWheelTask *PythonWheelTask `json:"python_wheel_task,omitempty"` @@ -5168,3 +5409,17 @@ type WebhookNotifications struct { // the `on_success` property. OnSuccess []Webhook `json:"on_success,omitempty"` } + +type WidgetErrorDetail struct { + Message string `json:"message,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *WidgetErrorDetail) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s WidgetErrorDetail) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} diff --git a/service/ml/api.go b/service/ml/api.go index 99ecc63d5..25fd7909b 100755 --- a/service/ml/api.go +++ b/service/ml/api.go @@ -72,6 +72,18 @@ type ExperimentsInterface interface { // exists. GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentByNameResponse, error) + // Get credentials to download trace data. + GetCredentialsForTraceDataDownload(ctx context.Context, request GetCredentialsForTraceDataDownloadRequest) (*GetCredentialsForTraceDataDownloadResponse, error) + + // Get credentials to download trace data. + GetCredentialsForTraceDataDownloadByRequestId(ctx context.Context, requestId string) (*GetCredentialsForTraceDataDownloadResponse, error) + + // Get credentials to upload trace data. + GetCredentialsForTraceDataUpload(ctx context.Context, request GetCredentialsForTraceDataUploadRequest) (*GetCredentialsForTraceDataUploadResponse, error) + + // Get credentials to upload trace data. + GetCredentialsForTraceDataUploadByRequestId(ctx context.Context, requestId string) (*GetCredentialsForTraceDataUploadResponse, error) + // Get an experiment. // // Gets metadata for an experiment. This method works on deleted experiments. @@ -352,6 +364,20 @@ type ExperimentsAPI struct { experimentsImpl } +// Get credentials to download trace data. +func (a *ExperimentsAPI) GetCredentialsForTraceDataDownloadByRequestId(ctx context.Context, requestId string) (*GetCredentialsForTraceDataDownloadResponse, error) { + return a.experimentsImpl.GetCredentialsForTraceDataDownload(ctx, GetCredentialsForTraceDataDownloadRequest{ + RequestId: requestId, + }) +} + +// Get credentials to upload trace data. +func (a *ExperimentsAPI) GetCredentialsForTraceDataUploadByRequestId(ctx context.Context, requestId string) (*GetCredentialsForTraceDataUploadResponse, error) { + return a.experimentsImpl.GetCredentialsForTraceDataUpload(ctx, GetCredentialsForTraceDataUploadRequest{ + RequestId: requestId, + }) +} + // Get experiment permission levels. // // Gets the permission levels that a user can have on an object. diff --git a/service/ml/impl.go b/service/ml/impl.go index cb27bdc46..ce1d4c630 100755 --- a/service/ml/impl.go +++ b/service/ml/impl.go @@ -93,6 +93,26 @@ func (a *experimentsImpl) GetByName(ctx context.Context, request GetByNameReques return &getExperimentByNameResponse, err } +func (a *experimentsImpl) GetCredentialsForTraceDataDownload(ctx context.Context, request GetCredentialsForTraceDataDownloadRequest) (*GetCredentialsForTraceDataDownloadResponse, error) { + var getCredentialsForTraceDataDownloadResponse GetCredentialsForTraceDataDownloadResponse + path := fmt.Sprintf("/api/2.0/mlflow/traces/%v/credentials-for-data-download", request.RequestId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCredentialsForTraceDataDownloadResponse) + return &getCredentialsForTraceDataDownloadResponse, err +} + +func (a *experimentsImpl) GetCredentialsForTraceDataUpload(ctx context.Context, request GetCredentialsForTraceDataUploadRequest) (*GetCredentialsForTraceDataUploadResponse, error) { + var getCredentialsForTraceDataUploadResponse GetCredentialsForTraceDataUploadResponse + path := fmt.Sprintf("/api/2.0/mlflow/traces/%v/credentials-for-data-upload", request.RequestId) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &getCredentialsForTraceDataUploadResponse) + return &getCredentialsForTraceDataUploadResponse, err +} + func (a *experimentsImpl) GetExperiment(ctx context.Context, request GetExperimentRequest) (*GetExperimentResponse, error) { var getExperimentResponse GetExperimentResponse path := "/api/2.0/mlflow/experiments/get" diff --git a/service/ml/interface.go b/service/ml/interface.go index 2d90493d2..928e99553 100755 --- a/service/ml/interface.go +++ b/service/ml/interface.go @@ -75,6 +75,12 @@ type ExperimentsService interface { // exists. GetByName(ctx context.Context, request GetByNameRequest) (*GetExperimentByNameResponse, error) + // Get credentials to download trace data. + GetCredentialsForTraceDataDownload(ctx context.Context, request GetCredentialsForTraceDataDownloadRequest) (*GetCredentialsForTraceDataDownloadResponse, error) + + // Get credentials to upload trace data. + GetCredentialsForTraceDataUpload(ctx context.Context, request GetCredentialsForTraceDataUploadRequest) (*GetCredentialsForTraceDataUploadResponse, error) + // Get an experiment. // // Gets metadata for an experiment. This method works on deleted diff --git a/service/ml/model.go b/service/ml/model.go index 5573c0279..b0e0c0491 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -207,6 +207,82 @@ type ApproveTransitionRequestResponse struct { Activity *Activity `json:"activity,omitempty"` } +type ArtifactCredentialInfo struct { + // A collection of HTTP headers that should be specified when uploading to + // or downloading from the specified `signed_uri`. + Headers []ArtifactCredentialInfoHttpHeader `json:"headers,omitempty"` + // The path, relative to the Run's artifact root location, of the artifact + // that can be accessed with the credential. + Path string `json:"path,omitempty"` + // The ID of the MLflow Run containing the artifact that can be accessed + // with the credential. + RunId string `json:"run_id,omitempty"` + // The signed URI credential that provides access to the artifact. + SignedUri string `json:"signed_uri,omitempty"` + // The type of the signed credential URI (e.g., an AWS presigned URL or an + // Azure Shared Access Signature URI). + Type ArtifactCredentialType `json:"type,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ArtifactCredentialInfo) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ArtifactCredentialInfo) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ArtifactCredentialInfoHttpHeader struct { + // The HTTP header name. + Name string `json:"name,omitempty"` + // The HTTP header value. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ArtifactCredentialInfoHttpHeader) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ArtifactCredentialInfoHttpHeader) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// The type of a given artifact access credential +type ArtifactCredentialType string + +const ArtifactCredentialTypeAwsPresignedUrl ArtifactCredentialType = `AWS_PRESIGNED_URL` + +const ArtifactCredentialTypeAzureAdlsGen2SasUri ArtifactCredentialType = `AZURE_ADLS_GEN2_SAS_URI` + +const ArtifactCredentialTypeAzureSasUri ArtifactCredentialType = `AZURE_SAS_URI` + +const ArtifactCredentialTypeGcpSignedUrl ArtifactCredentialType = `GCP_SIGNED_URL` + +// String representation for [fmt.Print] +func (f *ArtifactCredentialType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ArtifactCredentialType) Set(v string) error { + switch v { + case `AWS_PRESIGNED_URL`, `AZURE_ADLS_GEN2_SAS_URI`, `AZURE_SAS_URI`, `GCP_SIGNED_URL`: + *f = ArtifactCredentialType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AWS_PRESIGNED_URL", "AZURE_ADLS_GEN2_SAS_URI", "AZURE_SAS_URI", "GCP_SIGNED_URL"`, v) + } +} + +// Type always returns ArtifactCredentialType to satisfy [pflag.Value] interface +func (f *ArtifactCredentialType) Type() string { + return "ArtifactCredentialType" +} + // An action that a user (with sufficient permissions) could take on a comment. // Valid values are: * `EDIT_COMMENT`: Edit the comment // @@ -320,56 +396,60 @@ func (s CreateExperimentResponse) MarshalJSON() ([]byte, error) { } type CreateForecastingExperimentRequest struct { - // Name of the column in the input training table used to customize the - // weight for each time series to calculate weighted metrics. + // The column in the training table used to customize weights for each time + // series. CustomWeightsColumn string `json:"custom_weights_column,omitempty"` - // The path to the created experiment. This is the path where the experiment - // will be stored in the workspace. + // The path in the workspace to store the created experiment. ExperimentPath string `json:"experiment_path,omitempty"` - // The granularity of the forecast. This defines the time interval between - // consecutive rows in the time series data. Possible values: '1 second', '1 - // minute', '5 minutes', '10 minutes', '15 minutes', '30 minutes', 'Hourly', - // 'Daily', 'Weekly', 'Monthly', 'Quarterly', 'Yearly'. + // The time interval between consecutive rows in the time series data. + // Possible values include: '1 second', '1 minute', '5 minutes', '10 + // minutes', '15 minutes', '30 minutes', 'Hourly', 'Daily', 'Weekly', + // 'Monthly', 'Quarterly', 'Yearly'. ForecastGranularity string `json:"forecast_granularity"` - // The number of time steps into the future for which predictions should be - // made. This value represents a multiple of forecast_granularity - // determining how far ahead the model will forecast. + // The number of time steps into the future to make predictions, calculated + // as a multiple of forecast_granularity. This value represents how far + // ahead the model should forecast. ForecastHorizon int64 `json:"forecast_horizon"` - // Region code(s) to consider when automatically adding holiday features. - // When empty, no holiday features are added. Only supports 1 holiday region - // for now. + // The region code(s) to automatically add holiday features. Currently + // supports only one region. HolidayRegions []string `json:"holiday_regions,omitempty"` - // The maximum duration in minutes for which the experiment is allowed to - // run. If the experiment exceeds this time limit it will be stopped - // automatically. + // Specifies the list of feature columns to include in model training. These + // columns must exist in the training data and be of type string, numerical, + // or boolean. If not specified, no additional features will be included. + // Note: Certain columns are automatically handled: - Automatically + // excluded: split_column, target_column, custom_weights_column. - + // Automatically included: time_column. + IncludeFeatures []string `json:"include_features,omitempty"` + // The maximum duration for the experiment in minutes. The experiment stops + // automatically if it exceeds this limit. MaxRuntime int64 `json:"max_runtime,omitempty"` - // The three-level (fully qualified) path to a unity catalog table. This - // table path serves to store the predictions. + // The fully qualified path of a Unity Catalog table, formatted as + // catalog_name.schema_name.table_name, used to store predictions. PredictionDataPath string `json:"prediction_data_path,omitempty"` // The evaluation metric used to optimize the forecasting model. PrimaryMetric string `json:"primary_metric,omitempty"` - // The three-level (fully qualified) path to a unity catalog model. This - // model path serves to store the best model. + // The fully qualified path of a Unity Catalog model, formatted as + // catalog_name.schema_name.model_name, used to store the best model. RegisterTo string `json:"register_to,omitempty"` - // Name of the column in the input training table used for custom data - // splits. The values in this column must be "train", "validate", or "test" - // to indicate which split each row belongs to. + // // The column in the training table used for custom data splits. Values + // must be 'train', 'validate', or 'test'. SplitColumn string `json:"split_column,omitempty"` - // Name of the column in the input training table that serves as the - // prediction target. The values in this column will be used as the ground - // truth for model training. + // The column in the input training table used as the prediction target for + // model training. The values in this column are used as the ground truth + // for model training. TargetColumn string `json:"target_column"` - // Name of the column in the input training table that represents the - // timestamp of each row. + // The column in the input training table that represents each row's + // timestamp. TimeColumn string `json:"time_column"` - // Name of the column in the input training table used to group the dataset - // to predict individual time series + // The column in the training table used to group the dataset for predicting + // individual time series. TimeseriesIdentifierColumns []string `json:"timeseries_identifier_columns,omitempty"` - // The three-level (fully qualified) name of a unity catalog table. This - // table serves as the training data for the forecasting model. + // The fully qualified name of a Unity Catalog table, formatted as + // catalog_name.schema_name.table_name, used as training data for the + // forecasting model. TrainDataPath string `json:"train_data_path"` - // The list of frameworks to include for model tuning. Possible values: - // 'Prophet', 'ARIMA', 'DeepAR'. An empty list will include all supported + // List of frameworks to include for model tuning. Possible values are + // 'Prophet', 'ARIMA', 'DeepAR'. An empty list includes all supported // frameworks. TrainingFrameworks []string `json:"training_frameworks,omitempty"` @@ -1015,11 +1095,11 @@ func (s ExperimentTag) MarshalJSON() ([]byte, error) { // Metadata of a single artifact file or directory. type FileInfo struct { - // Size in bytes. Unset for directories. + // The size in bytes of the file. Unset for directories. FileSize int64 `json:"file_size,omitempty"` // Whether the path is a directory. IsDir bool `json:"is_dir,omitempty"` - // Path relative to the root artifact directory run. + // The path relative to the root artifact directory run. Path string `json:"path,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1093,6 +1173,28 @@ type GetByNameRequest struct { ExperimentName string `json:"-" url:"experiment_name"` } +// Get credentials to download trace data +type GetCredentialsForTraceDataDownloadRequest struct { + // The ID of the trace to fetch artifact download credentials for. + RequestId string `json:"-" url:"-"` +} + +type GetCredentialsForTraceDataDownloadResponse struct { + // The artifact download credentials for the specified trace data. + CredentialInfo *ArtifactCredentialInfo `json:"credential_info,omitempty"` +} + +// Get credentials to upload trace data +type GetCredentialsForTraceDataUploadRequest struct { + // The ID of the trace to fetch artifact upload credentials for. + RequestId string `json:"-" url:"-"` +} + +type GetCredentialsForTraceDataUploadResponse struct { + // The artifact upload credentials for the specified trace data. + CredentialInfo *ArtifactCredentialInfo `json:"credential_info,omitempty"` +} + type GetExperimentByNameResponse struct { // Experiment details. Experiment *Experiment `json:"experiment,omitempty"` @@ -1387,8 +1489,8 @@ func (s JobSpecWithoutSecret) MarshalJSON() ([]byte, error) { // List artifacts type ListArtifactsRequest struct { - // Token indicating the page of artifact results to fetch. `page_token` is - // not supported when listing artifacts in UC Volumes. A maximum of 1000 + // The token indicating the page of artifact results to fetch. `page_token` + // is not supported when listing artifacts in UC Volumes. A maximum of 1000 // artifacts will be retrieved for UC Volumes. Please call // `/api/2.0/fs/directories{directory_path}` for listing artifacts in UC // Volumes, which supports pagination. See [List directory contents | Files @@ -1415,11 +1517,11 @@ func (s ListArtifactsRequest) MarshalJSON() ([]byte, error) { } type ListArtifactsResponse struct { - // File location and metadata for artifacts. + // The file location and metadata for artifacts. Files []FileInfo `json:"files,omitempty"` - // Token that can be used to retrieve the next page of artifact results + // The token that can be used to retrieve the next page of artifact results. NextPageToken string `json:"next_page_token,omitempty"` - // Root artifact directory for the run. + // The root artifact directory for the run. RootUri string `json:"root_uri,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1595,6 +1697,8 @@ type LogBatchResponse struct { type LogInputs struct { // Dataset inputs Datasets []DatasetInput `json:"datasets,omitempty"` + // Model inputs + Models []ModelInput `json:"models,omitempty"` // ID of the run to log under RunId string `json:"run_id"` } @@ -1603,8 +1707,17 @@ type LogInputsResponse struct { } type LogMetric struct { + // Dataset digest of the dataset associated with the metric, e.g. an md5 + // hash of the dataset that uniquely identifies it within datasets of the + // same name. + DatasetDigest string `json:"dataset_digest,omitempty"` + // The name of the dataset associated with the metric. E.g. + // “my.uc.table@2” “nyc-taxi-dataset”, “fantastic-elk-3” + DatasetName string `json:"dataset_name,omitempty"` // Name of the metric. Key string `json:"key"` + // ID of the logged model associated with the metric, if applicable + ModelId string `json:"model_id,omitempty"` // ID of the run under which to log the metric. Must be provided. RunId string `json:"run_id,omitempty"` // [Deprecated, use `run_id` instead] ID of the run under which to log the @@ -1678,13 +1791,25 @@ type LogParamResponse struct { // Metric associated with a run, represented as a key-value pair. type Metric struct { - // Key identifying this metric. + // The dataset digest of the dataset associated with the metric, e.g. an md5 + // hash of the dataset that uniquely identifies it within datasets of the + // same name. + DatasetDigest string `json:"dataset_digest,omitempty"` + // The name of the dataset associated with the metric. E.g. + // “my.uc.table@2” “nyc-taxi-dataset”, “fantastic-elk-3” + DatasetName string `json:"dataset_name,omitempty"` + // The key identifying the metric. Key string `json:"key,omitempty"` - // Step at which to log the metric. + // The ID of the logged model or registered model version associated with + // the metric, if applicable. + ModelId string `json:"model_id,omitempty"` + // The ID of the run containing the metric. + RunId string `json:"run_id,omitempty"` + // The step at which the metric was logged. Step int64 `json:"step,omitempty"` - // The timestamp at which this metric was recorded. + // The timestamp at which the metric was recorded. Timestamp int64 `json:"timestamp,omitempty"` - // Value associated with this metric. + // The value of the metric. Value float64 `json:"value,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -1759,6 +1884,12 @@ func (s ModelDatabricks) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Represents a LoggedModel or Registered Model Version input to a Run. +type ModelInput struct { + // The unique identifier of the model. + ModelId string `json:"model_id"` +} + type ModelTag struct { // The tag key. Key string `json:"key,omitempty"` @@ -2479,6 +2610,11 @@ func (f *RunInfoStatus) Type() string { type RunInputs struct { // Run metrics. DatasetInputs []DatasetInput `json:"dataset_inputs,omitempty"` + // **NOTE**: Experimental: This API field may change or be removed in a + // future release without warning. + // + // Model inputs to the Run. + ModelInputs []ModelInput `json:"model_inputs,omitempty"` } // Tag for a run. diff --git a/service/pipelines/api.go b/service/pipelines/api.go index be5ff8121..68651fed6 100755 --- a/service/pipelines/api.go +++ b/service/pipelines/api.go @@ -16,10 +16,6 @@ import ( type PipelinesInterface interface { - // WaitGetPipelineRunning repeatedly calls [PipelinesAPI.Get] and waits to reach RUNNING state - WaitGetPipelineRunning(ctx context.Context, pipelineId string, - timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) - // WaitGetPipelineIdle repeatedly calls [PipelinesAPI.Get] and waits to reach IDLE state WaitGetPipelineIdle(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) @@ -205,60 +201,6 @@ type PipelinesAPI struct { pipelinesImpl } -// WaitGetPipelineRunning repeatedly calls [PipelinesAPI.Get] and waits to reach RUNNING state -func (a *PipelinesAPI) WaitGetPipelineRunning(ctx context.Context, pipelineId string, - timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) { - ctx = useragent.InContext(ctx, "sdk-feature", "long-running") - return retries.Poll[GetPipelineResponse](ctx, timeout, func() (*GetPipelineResponse, *retries.Err) { - getPipelineResponse, err := a.Get(ctx, GetPipelineRequest{ - PipelineId: pipelineId, - }) - if err != nil { - return nil, retries.Halt(err) - } - if callback != nil { - callback(getPipelineResponse) - } - status := getPipelineResponse.State - statusMessage := getPipelineResponse.Cause - switch status { - case PipelineStateRunning: // target state - return getPipelineResponse, nil - case PipelineStateFailed: - err := fmt.Errorf("failed to reach %s, got %s: %s", - PipelineStateRunning, status, statusMessage) - return nil, retries.Halt(err) - default: - return nil, retries.Continues(statusMessage) - } - }) -} - -// WaitGetPipelineRunning is a wrapper that calls [PipelinesAPI.WaitGetPipelineRunning] and waits to reach RUNNING state. -type WaitGetPipelineRunning[R any] struct { - Response *R - PipelineId string `json:"pipeline_id"` - Poll func(time.Duration, func(*GetPipelineResponse)) (*GetPipelineResponse, error) - callback func(*GetPipelineResponse) - timeout time.Duration -} - -// OnProgress invokes a callback every time it polls for the status update. -func (w *WaitGetPipelineRunning[R]) OnProgress(callback func(*GetPipelineResponse)) *WaitGetPipelineRunning[R] { - w.callback = callback - return w -} - -// Get the GetPipelineResponse with the default timeout of 20 minutes. -func (w *WaitGetPipelineRunning[R]) Get() (*GetPipelineResponse, error) { - return w.Poll(w.timeout, w.callback) -} - -// Get the GetPipelineResponse with custom timeout. -func (w *WaitGetPipelineRunning[R]) GetWithTimeout(timeout time.Duration) (*GetPipelineResponse, error) { - return w.Poll(timeout, w.callback) -} - // WaitGetPipelineIdle repeatedly calls [PipelinesAPI.Get] and waits to reach IDLE state func (a *PipelinesAPI) WaitGetPipelineIdle(ctx context.Context, pipelineId string, timeout time.Duration, callback func(*GetPipelineResponse)) (*GetPipelineResponse, error) { diff --git a/service/pkg.go b/service/pkg.go index 052bf33ae..95dbd2fba 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -52,10 +52,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. -// // - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. // +// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. @@ -82,8 +82,14 @@ // // - [settings.DisableLegacyFeaturesAPI]: Disable legacy features for new Databricks workspaces. // +// - [settings.EnableExportNotebookAPI]: Controls whether users can export notebooks and files from the Workspace. +// // - [settings.EnableIpAccessListsAPI]: Controls the enforcement of IP access lists for accessing the account console. // +// - [settings.EnableNotebookTableClipboardAPI]: Controls whether users can copy tabular data to the clipboard via the UI. +// +// - [settings.EnableResultsDownloadingAPI]: Controls whether users can download notebook results. +// // - [provisioning.EncryptionKeysAPI]: These APIs manage encryption key configurations for this workspace (optional). // // - [settings.EnhancedSecurityMonitoringAPI]: Controls whether enhanced security monitoring is enabled for the current workspace. @@ -339,8 +345,8 @@ var ( _ *marketplace.ConsumerListingsAPI = nil _ *marketplace.ConsumerPersonalizationRequestsAPI = nil _ *marketplace.ConsumerProvidersAPI = nil - _ *catalog.CredentialsAPI = nil _ *provisioning.CredentialsAPI = nil + _ *catalog.CredentialsAPI = nil _ *settings.CredentialsManagerAPI = nil _ *settings.CspEnablementAccountAPI = nil _ *iam.CurrentUserAPI = nil @@ -354,7 +360,10 @@ var ( _ *settings.DisableLegacyAccessAPI = nil _ *settings.DisableLegacyDbfsAPI = nil _ *settings.DisableLegacyFeaturesAPI = nil + _ *settings.EnableExportNotebookAPI = nil _ *settings.EnableIpAccessListsAPI = nil + _ *settings.EnableNotebookTableClipboardAPI = nil + _ *settings.EnableResultsDownloadingAPI = nil _ *provisioning.EncryptionKeysAPI = nil _ *settings.EnhancedSecurityMonitoringAPI = nil _ *settings.EsmEnablementAccountAPI = nil diff --git a/service/serving/api.go b/service/serving/api.go index 3d5a3a133..c5e91522c 100755 --- a/service/serving/api.go +++ b/service/serving/api.go @@ -137,15 +137,14 @@ type ServingEndpointsInterface interface { // Update rate limits of a serving endpoint. // - // Used to update the rate limits of a serving endpoint. NOTE: Only foundation - // model endpoints are currently supported. For external models, use AI Gateway - // to manage rate limits. + // Deprecated: Please use AI Gateway to manage rate limits instead. Put(ctx context.Context, request PutRequest) (*PutResponse, error) // Update AI Gateway of a serving endpoint. // - // Used to update the AI Gateway of a serving endpoint. NOTE: Only external - // model and provisioned throughput endpoints are currently supported. + // Used to update the AI Gateway of a serving endpoint. NOTE: External model, + // provisioned throughput, and pay-per-token endpoints are fully supported; + // agent endpoints currently only support inference tables. PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) // Query a serving endpoint. diff --git a/service/serving/interface.go b/service/serving/interface.go index ae4452c3f..0cf50e923 100755 --- a/service/serving/interface.go +++ b/service/serving/interface.go @@ -83,15 +83,14 @@ type ServingEndpointsService interface { // Update rate limits of a serving endpoint. // - // Used to update the rate limits of a serving endpoint. NOTE: Only - // foundation model endpoints are currently supported. For external models, - // use AI Gateway to manage rate limits. + // Deprecated: Please use AI Gateway to manage rate limits instead. Put(ctx context.Context, request PutRequest) (*PutResponse, error) // Update AI Gateway of a serving endpoint. // - // Used to update the AI Gateway of a serving endpoint. NOTE: Only external - // model and provisioned throughput endpoints are currently supported. + // Used to update the AI Gateway of a serving endpoint. NOTE: External + // model, provisioned throughput, and pay-per-token endpoints are fully + // supported; agent endpoints currently only support inference tables. PutAiGateway(ctx context.Context, request PutAiGatewayRequest) (*PutAiGatewayResponse, error) // Query a serving endpoint. diff --git a/service/serving/model.go b/service/serving/model.go index a52550167..502c8698c 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -506,9 +506,9 @@ func (s CohereConfig) MarshalJSON() ([]byte, error) { } type CreateServingEndpoint struct { - // The AI Gateway configuration for the serving endpoint. NOTE: Only - // external model and provisioned throughput endpoints are currently - // supported. + // The AI Gateway configuration for the serving endpoint. NOTE: External + // model, provisioned throughput, and pay-per-token endpoints are fully + // supported; agent endpoints currently only support inference tables. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The budget policy to be applied to the serving endpoint. BudgetPolicyId string `json:"budget_policy_id,omitempty"` @@ -1936,9 +1936,9 @@ type ServerLogsResponse struct { } type ServingEndpoint struct { - // The AI Gateway configuration for the serving endpoint. NOTE: Only - // external model and provisioned throughput endpoints are currently - // supported. + // The AI Gateway configuration for the serving endpoint. NOTE: External + // model, provisioned throughput, and pay-per-token endpoints are fully + // supported; agent endpoints currently only support inference tables. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The budget policy associated with the endpoint. BudgetPolicyId string `json:"budget_policy_id,omitempty"` @@ -2018,9 +2018,9 @@ func (s ServingEndpointAccessControlResponse) MarshalJSON() ([]byte, error) { } type ServingEndpointDetailed struct { - // The AI Gateway configuration for the serving endpoint. NOTE: Only - // external model and provisioned throughput endpoints are currently - // supported. + // The AI Gateway configuration for the serving endpoint. NOTE: External + // model, provisioned throughput, and pay-per-token endpoints are fully + // supported; agent endpoints currently only support inference tables. AiGateway *AiGatewayConfig `json:"ai_gateway,omitempty"` // The budget policy associated with the endpoint. BudgetPolicyId string `json:"budget_policy_id,omitempty"` diff --git a/service/settings/api.go b/service/settings/api.go index 039558db5..efbd46945 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Account Ip Access Lists, Account Settings, Aibi Dashboard Embedding Access Policy, Aibi Dashboard Embedding Approved Domains, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Dbfs, Disable Legacy Features, Enable Ip Access Lists, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. +// These APIs allow you to manage Account Ip Access Lists, Account Settings, Aibi Dashboard Embedding Access Policy, Aibi Dashboard Embedding Approved Domains, Automatic Cluster Update, Compliance Security Profile, Credentials Manager, Csp Enablement Account, Default Namespace, Disable Legacy Access, Disable Legacy Dbfs, Disable Legacy Features, Enable Export Notebook, Enable Ip Access Lists, Enable Notebook Table Clipboard, Enable Results Downloading, Enhanced Security Monitoring, Esm Enablement Account, Ip Access Lists, Network Connectivity, Notification Destinations, Personal Compute, Restrict Workspace Admins, Settings, Token Management, Tokens, Workspace Conf, etc. package settings import ( @@ -626,11 +626,10 @@ func NewDisableLegacyAccess(client *client.DatabricksClient) *DisableLegacyAcces // 'Disabling legacy access' has the following impacts: // -// 1. Disables direct access to the Hive Metastore. However, you can still -// access Hive Metastore through HMS Federation. 2. Disables Fallback Mode (docs -// link) on any External Location access from the workspace. 3. Alters DBFS path -// access to use External Location permissions in place of legacy credentials. -// 4. Enforces Unity Catalog access on all path based access. +// 1. Disables direct access to Hive Metastores from the workspace. However, you +// can still access a Hive Metastore through Hive Metastore federation. 2. +// Disables fallback mode on external location access from the workspace. 3. +// Disables Databricks Runtime versions prior to 13.3LTS. type DisableLegacyAccessAPI struct { disableLegacyAccessImpl } @@ -705,6 +704,35 @@ type DisableLegacyFeaturesAPI struct { disableLegacyFeaturesImpl } +type EnableExportNotebookInterface interface { + + // Get the Enable Export Notebook setting. + // + // Gets the Enable Export Notebook setting. + GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) + + // Update the Enable Export Notebook setting. + // + // Updates the Enable Export Notebook setting. The model follows eventual + // consistency, which means the get after the update operation might receive + // stale values for some time. + PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) +} + +func NewEnableExportNotebook(client *client.DatabricksClient) *EnableExportNotebookAPI { + return &EnableExportNotebookAPI{ + enableExportNotebookImpl: enableExportNotebookImpl{ + client: client, + }, + } +} + +// Controls whether users can export notebooks and files from the Workspace. By +// default, this setting is enabled. +type EnableExportNotebookAPI struct { + enableExportNotebookImpl +} + type EnableIpAccessListsInterface interface { // Delete the account IP access toggle setting. @@ -738,6 +766,64 @@ type EnableIpAccessListsAPI struct { enableIpAccessListsImpl } +type EnableNotebookTableClipboardInterface interface { + + // Get the Enable Notebook Table Clipboard setting. + // + // Gets the Enable Notebook Table Clipboard setting. + GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) + + // Update the Enable Notebook Table Clipboard setting. + // + // Updates the Enable Notebook Table Clipboard setting. The model follows + // eventual consistency, which means the get after the update operation might + // receive stale values for some time. + PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) +} + +func NewEnableNotebookTableClipboard(client *client.DatabricksClient) *EnableNotebookTableClipboardAPI { + return &EnableNotebookTableClipboardAPI{ + enableNotebookTableClipboardImpl: enableNotebookTableClipboardImpl{ + client: client, + }, + } +} + +// Controls whether users can copy tabular data to the clipboard via the UI. By +// default, this setting is enabled. +type EnableNotebookTableClipboardAPI struct { + enableNotebookTableClipboardImpl +} + +type EnableResultsDownloadingInterface interface { + + // Get the Enable Results Downloading setting. + // + // Gets the Enable Results Downloading setting. + GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) + + // Update the Enable Results Downloading setting. + // + // Updates the Enable Results Downloading setting. The model follows eventual + // consistency, which means the get after the update operation might receive + // stale values for some time. + PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) +} + +func NewEnableResultsDownloading(client *client.DatabricksClient) *EnableResultsDownloadingAPI { + return &EnableResultsDownloadingAPI{ + enableResultsDownloadingImpl: enableResultsDownloadingImpl{ + client: client, + }, + } +} + +// Controls whether users can download notebook results. By default, this +// setting is enabled. +type EnableResultsDownloadingAPI struct { + enableResultsDownloadingImpl +} + type EnhancedSecurityMonitoringInterface interface { // Get the enhanced security monitoring setting. @@ -1403,11 +1489,10 @@ type SettingsInterface interface { // 'Disabling legacy access' has the following impacts: // - // 1. Disables direct access to the Hive Metastore. However, you can still - // access Hive Metastore through HMS Federation. 2. Disables Fallback Mode - // (docs link) on any External Location access from the workspace. 3. Alters - // DBFS path access to use External Location permissions in place of legacy - // credentials. 4. Enforces Unity Catalog access on all path based access. + // 1. Disables direct access to Hive Metastores from the workspace. However, + // you can still access a Hive Metastore through Hive Metastore federation. + // 2. Disables fallback mode on external location access from the workspace. + // 3. Disables Databricks Runtime versions prior to 13.3LTS. DisableLegacyAccess() DisableLegacyAccessInterface // When this setting is on, access to DBFS root and DBFS mounts is @@ -1415,6 +1500,18 @@ type SettingsInterface interface { // all DBFS functionality is enabled DisableLegacyDbfs() DisableLegacyDbfsInterface + // Controls whether users can export notebooks and files from the Workspace. + // By default, this setting is enabled. + EnableExportNotebook() EnableExportNotebookInterface + + // Controls whether users can copy tabular data to the clipboard via the UI. + // By default, this setting is enabled. + EnableNotebookTableClipboard() EnableNotebookTableClipboardInterface + + // Controls whether users can download notebook results. By default, this + // setting is enabled. + EnableResultsDownloading() EnableResultsDownloadingInterface + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the @@ -1460,6 +1557,12 @@ func NewSettings(client *client.DatabricksClient) *SettingsAPI { disableLegacyDbfs: NewDisableLegacyDbfs(client), + enableExportNotebook: NewEnableExportNotebook(client), + + enableNotebookTableClipboard: NewEnableNotebookTableClipboard(client), + + enableResultsDownloading: NewEnableResultsDownloading(client), + enhancedSecurityMonitoring: NewEnhancedSecurityMonitoring(client), restrictWorkspaceAdmins: NewRestrictWorkspaceAdmins(client), @@ -1509,11 +1612,10 @@ type SettingsAPI struct { // 'Disabling legacy access' has the following impacts: // - // 1. Disables direct access to the Hive Metastore. However, you can still - // access Hive Metastore through HMS Federation. 2. Disables Fallback Mode - // (docs link) on any External Location access from the workspace. 3. Alters - // DBFS path access to use External Location permissions in place of legacy - // credentials. 4. Enforces Unity Catalog access on all path based access. + // 1. Disables direct access to Hive Metastores from the workspace. However, + // you can still access a Hive Metastore through Hive Metastore federation. + // 2. Disables fallback mode on external location access from the workspace. + // 3. Disables Databricks Runtime versions prior to 13.3LTS. disableLegacyAccess DisableLegacyAccessInterface // When this setting is on, access to DBFS root and DBFS mounts is @@ -1521,6 +1623,18 @@ type SettingsAPI struct { // all DBFS functionality is enabled disableLegacyDbfs DisableLegacyDbfsInterface + // Controls whether users can export notebooks and files from the Workspace. + // By default, this setting is enabled. + enableExportNotebook EnableExportNotebookInterface + + // Controls whether users can copy tabular data to the clipboard via the UI. + // By default, this setting is enabled. + enableNotebookTableClipboard EnableNotebookTableClipboardInterface + + // Controls whether users can download notebook results. By default, this + // setting is enabled. + enableResultsDownloading EnableResultsDownloadingInterface + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the @@ -1574,6 +1688,18 @@ func (a *SettingsAPI) DisableLegacyDbfs() DisableLegacyDbfsInterface { return a.disableLegacyDbfs } +func (a *SettingsAPI) EnableExportNotebook() EnableExportNotebookInterface { + return a.enableExportNotebook +} + +func (a *SettingsAPI) EnableNotebookTableClipboard() EnableNotebookTableClipboardInterface { + return a.enableNotebookTableClipboard +} + +func (a *SettingsAPI) EnableResultsDownloading() EnableResultsDownloadingInterface { + return a.enableResultsDownloading +} + func (a *SettingsAPI) EnhancedSecurityMonitoring() EnhancedSecurityMonitoringInterface { return a.enhancedSecurityMonitoring } diff --git a/service/settings/impl.go b/service/settings/impl.go index ec11fe6ac..7590bdf0d 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -422,6 +422,32 @@ func (a *disableLegacyFeaturesImpl) Update(ctx context.Context, request UpdateDi return &disableLegacyFeatures, err } +// unexported type that holds implementations of just EnableExportNotebook API methods +type enableExportNotebookImpl struct { + client *client.DatabricksClient +} + +func (a *enableExportNotebookImpl) GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) { + var enableExportNotebook EnableExportNotebook + path := "/api/2.0/settings/types/enable-export-notebook/names/default" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &enableExportNotebook) + return &enableExportNotebook, err +} + +func (a *enableExportNotebookImpl) PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) { + var enableExportNotebook EnableExportNotebook + path := "/api/2.0/settings/types/enable-export-notebook/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &enableExportNotebook) + return &enableExportNotebook, err +} + // unexported type that holds implementations of just EnableIpAccessLists API methods type enableIpAccessListsImpl struct { client *client.DatabricksClient @@ -458,6 +484,58 @@ func (a *enableIpAccessListsImpl) Update(ctx context.Context, request UpdateAcco return &accountIpAccessEnable, err } +// unexported type that holds implementations of just EnableNotebookTableClipboard API methods +type enableNotebookTableClipboardImpl struct { + client *client.DatabricksClient +} + +func (a *enableNotebookTableClipboardImpl) GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) { + var enableNotebookTableClipboard EnableNotebookTableClipboard + path := "/api/2.0/settings/types/enable-notebook-table-clipboard/names/default" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &enableNotebookTableClipboard) + return &enableNotebookTableClipboard, err +} + +func (a *enableNotebookTableClipboardImpl) PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) { + var enableNotebookTableClipboard EnableNotebookTableClipboard + path := "/api/2.0/settings/types/enable-notebook-table-clipboard/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &enableNotebookTableClipboard) + return &enableNotebookTableClipboard, err +} + +// unexported type that holds implementations of just EnableResultsDownloading API methods +type enableResultsDownloadingImpl struct { + client *client.DatabricksClient +} + +func (a *enableResultsDownloadingImpl) GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) { + var enableResultsDownloading EnableResultsDownloading + path := "/api/2.0/settings/types/enable-results-downloading/names/default" + + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, nil, nil, &enableResultsDownloading) + return &enableResultsDownloading, err +} + +func (a *enableResultsDownloadingImpl) PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) { + var enableResultsDownloading EnableResultsDownloading + path := "/api/2.0/settings/types/enable-results-downloading/names/default" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &enableResultsDownloading) + return &enableResultsDownloading, err +} + // unexported type that holds implementations of just EnhancedSecurityMonitoring API methods type enhancedSecurityMonitoringImpl struct { client *client.DatabricksClient diff --git a/service/settings/interface.go b/service/settings/interface.go index cb38b4641..f5faae367 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -275,11 +275,10 @@ type DefaultNamespaceService interface { // 'Disabling legacy access' has the following impacts: // -// 1. Disables direct access to the Hive Metastore. However, you can still -// access Hive Metastore through HMS Federation. 2. Disables Fallback Mode (docs -// link) on any External Location access from the workspace. 3. Alters DBFS path -// access to use External Location permissions in place of legacy credentials. -// 4. Enforces Unity Catalog access on all path based access. +// 1. Disables direct access to Hive Metastores from the workspace. However, you +// can still access a Hive Metastore through Hive Metastore federation. 2. +// Disables fallback mode on external location access from the workspace. 3. +// Disables Databricks Runtime versions prior to 13.3LTS. type DisableLegacyAccessService interface { // Delete Legacy Access Disablement Status. @@ -344,6 +343,23 @@ type DisableLegacyFeaturesService interface { Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) } +// Controls whether users can export notebooks and files from the Workspace. By +// default, this setting is enabled. +type EnableExportNotebookService interface { + + // Get the Enable Export Notebook setting. + // + // Gets the Enable Export Notebook setting. + GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) + + // Update the Enable Export Notebook setting. + // + // Updates the Enable Export Notebook setting. The model follows eventual + // consistency, which means the get after the update operation might receive + // stale values for some time. + PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) +} + // Controls the enforcement of IP access lists for accessing the account // console. Allowing you to enable or disable restricted access based on IP // addresses. @@ -365,6 +381,40 @@ type EnableIpAccessListsService interface { Update(ctx context.Context, request UpdateAccountIpAccessEnableRequest) (*AccountIpAccessEnable, error) } +// Controls whether users can copy tabular data to the clipboard via the UI. By +// default, this setting is enabled. +type EnableNotebookTableClipboardService interface { + + // Get the Enable Notebook Table Clipboard setting. + // + // Gets the Enable Notebook Table Clipboard setting. + GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) + + // Update the Enable Notebook Table Clipboard setting. + // + // Updates the Enable Notebook Table Clipboard setting. The model follows + // eventual consistency, which means the get after the update operation + // might receive stale values for some time. + PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) +} + +// Controls whether users can download notebook results. By default, this +// setting is enabled. +type EnableResultsDownloadingService interface { + + // Get the Enable Results Downloading setting. + // + // Gets the Enable Results Downloading setting. + GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) + + // Update the Enable Results Downloading setting. + // + // Updates the Enable Results Downloading setting. The model follows + // eventual consistency, which means the get after the update operation + // might receive stale values for some time. + PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) +} + // Controls whether enhanced security monitoring is enabled for the current // workspace. If the compliance security profile is enabled, this is // automatically enabled. By default, it is disabled. However, if the compliance diff --git a/service/settings/model.go b/service/settings/model.go index f05607616..39daaf756 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -1422,6 +1422,66 @@ type EmailConfig struct { type Empty struct { } +type EnableExportNotebook struct { + BooleanVal *BooleanMessage `json:"boolean_val,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *EnableExportNotebook) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnableExportNotebook) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EnableNotebookTableClipboard struct { + BooleanVal *BooleanMessage `json:"boolean_val,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *EnableNotebookTableClipboard) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnableNotebookTableClipboard) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type EnableResultsDownloading struct { + BooleanVal *BooleanMessage `json:"boolean_val,omitempty"` + // Name of the corresponding setting. This field is populated in the + // response, but it will not be respected even if it's set in the request + // body. The setting name in the path parameter will be respected instead. + // Setting name is required to be 'default' if the setting only has one + // instance per workspace. + SettingName string `json:"setting_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *EnableResultsDownloading) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s EnableResultsDownloading) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // SHIELD feature: ESM type EnhancedSecurityMonitoring struct { IsEnabled bool `json:"is_enabled,omitempty"` @@ -3110,6 +3170,69 @@ type UpdateDisableLegacyFeaturesRequest struct { Setting DisableLegacyFeatures `json:"setting"` } +// Details required to update a setting. +type UpdateEnableExportNotebookRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting EnableExportNotebook `json:"setting"` +} + +// Details required to update a setting. +type UpdateEnableNotebookTableClipboardRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting EnableNotebookTableClipboard `json:"setting"` +} + +// Details required to update a setting. +type UpdateEnableResultsDownloadingRequest struct { + // This should always be set to true for Settings API. Added for AIP + // compliance. + AllowMissing bool `json:"allow_missing"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + FieldMask string `json:"field_mask"` + + Setting EnableResultsDownloading `json:"setting"` +} + // Details required to update a setting. type UpdateEnhancedSecurityMonitoringSettingRequest struct { // This should always be set to true for Settings API. Added for AIP diff --git a/service/sql/model.go b/service/sql/model.go index 3d6336bf0..a74f2de93 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -3163,6 +3163,11 @@ type QueryFilter struct { type QueryInfo struct { // SQL Warehouse channel information at the time of query execution ChannelUsed *ChannelInfo `json:"channel_used,omitempty"` + // Client application that ran the statement. For example: Databricks SQL + // Editor, Tableau, and Power BI. This field is derived from information + // provided by client applications. While values are expected to remain + // static over time, this cannot be guaranteed. + ClientApplication string `json:"client_application,omitempty"` // Total execution time of the statement ( excluding result fetch time ). Duration int64 `json:"duration,omitempty"` // Alias for `warehouse_id`. @@ -4682,6 +4687,8 @@ const WarehousePermissionLevelCanMonitor WarehousePermissionLevel = `CAN_MONITOR const WarehousePermissionLevelCanUse WarehousePermissionLevel = `CAN_USE` +const WarehousePermissionLevelCanView WarehousePermissionLevel = `CAN_VIEW` + const WarehousePermissionLevelIsOwner WarehousePermissionLevel = `IS_OWNER` // String representation for [fmt.Print] @@ -4692,11 +4699,11 @@ func (f *WarehousePermissionLevel) String() string { // Set raw string value and validate it against allowed values func (f *WarehousePermissionLevel) Set(v string) error { switch v { - case `CAN_MANAGE`, `CAN_MONITOR`, `CAN_USE`, `IS_OWNER`: + case `CAN_MANAGE`, `CAN_MONITOR`, `CAN_USE`, `CAN_VIEW`, `IS_OWNER`: *f = WarehousePermissionLevel(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_MONITOR", "CAN_USE", "IS_OWNER"`, v) + return fmt.Errorf(`value "%s" is not one of "CAN_MANAGE", "CAN_MONITOR", "CAN_USE", "CAN_VIEW", "IS_OWNER"`, v) } } From 68f3330bbad45d4e5bb85e8be93285ca4c48ae90 Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Mon, 14 Apr 2025 14:27:12 +0000 Subject: [PATCH 39/54] [Release] Release v0.63.0 ## Release v0.63.0 ### API Changes * Added [w.EnableExportNotebook](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableExportNotebookAPI) workspace-level service, [w.EnableNotebookTableClipboard](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableNotebookTableClipboardAPI) workspace-level service and [w.EnableResultsDownloading](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableResultsDownloadingAPI) workspace-level service. * Added `GetCredentialsForTraceDataDownload` and `GetCredentialsForTraceDataUpload` methods for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service. * Added `GetDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. * Added `GetPublishedDashboardTokenInfo` method for [w.LakeviewEmbedded](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#LakeviewEmbeddedAPI) workspace-level service. * Added `BindingWorkspaceIds` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy). * Added `DownloadId` field for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). * Added `DashboardOutput` field for [jobs.RunOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunOutput). * Added `DashboardTask` and `PowerBiTask` fields for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). * Added `DashboardTask` and `PowerBiTask` fields for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). * Added `DashboardTask` and `PowerBiTask` fields for [jobs.Task](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Task). * Added `IncludeFeatures` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). * Added `Models` field for [ml.LogInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogInputs). * Added `DatasetDigest`, `DatasetName` and `ModelId` fields for [ml.LogMetric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogMetric). * Added `DatasetDigest`, `DatasetName`, `ModelId` and `RunId` fields for [ml.Metric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Metric). * Added `ModelInputs` field for [ml.RunInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#RunInputs). * Added `ClientApplication` field for [sql.QueryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryInfo). * Added `Geography` and `Geometry` enum values for [catalog.ColumnTypeName](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ColumnTypeName). * Added `AllocationTimeoutNoHealthyAndWarmedUpClusters`, `DockerContainerCreationException`, `DockerImageTooLargeForInstanceException` and `DockerInvalidOsException` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). * Added `Standard` enum value for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). * Added `CanView` enum value for [sql.WarehousePermissionLevel](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#WarehousePermissionLevel). * [Breaking] Changed `GenerateDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service . Method path has changed. * [Breaking] Changed waiter for [CommandExecutionAPI.Create](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Create). * [Breaking] Changed waiter for [CommandExecutionAPI.Execute](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Execute). * [Breaking] Removed `Error`, `Status` and `TransientStatementId` fields for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). * [Breaking] Removed `Balanced` and `CostOptimized` enum values for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). * [Breaking] Removed [PipelinesAPI.WaitGetPipelineRunning](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelinesAPI.WaitGetPipelineRunning) method. --- .release_metadata.json | 2 +- CHANGELOG.md | 31 +++++++++++++++++++++++++++++++ NEXT_CHANGELOG.md | 28 +--------------------------- version/version.go | 2 +- 4 files changed, 34 insertions(+), 29 deletions(-) diff --git a/.release_metadata.json b/.release_metadata.json index 73ae17128..a7b6137c2 100644 --- a/.release_metadata.json +++ b/.release_metadata.json @@ -1,3 +1,3 @@ { - "timestamp": "2025-04-10 13:33:22+0000" + "timestamp": "2025-04-14 14:27:08+0000" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 6e1babf14..946ad577e 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,36 @@ # Version changelog +## Release v0.63.0 + +### API Changes +* Added [w.EnableExportNotebook](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableExportNotebookAPI) workspace-level service, [w.EnableNotebookTableClipboard](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableNotebookTableClipboardAPI) workspace-level service and [w.EnableResultsDownloading](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableResultsDownloadingAPI) workspace-level service. +* Added `GetCredentialsForTraceDataDownload` and `GetCredentialsForTraceDataUpload` methods for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service. +* Added `GetDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. +* Added `GetPublishedDashboardTokenInfo` method for [w.LakeviewEmbedded](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#LakeviewEmbeddedAPI) workspace-level service. +* Added `BindingWorkspaceIds` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy). +* Added `DownloadId` field for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). +* Added `DashboardOutput` field for [jobs.RunOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunOutput). +* Added `DashboardTask` and `PowerBiTask` fields for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). +* Added `DashboardTask` and `PowerBiTask` fields for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). +* Added `DashboardTask` and `PowerBiTask` fields for [jobs.Task](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Task). +* Added `IncludeFeatures` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* Added `Models` field for [ml.LogInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogInputs). +* Added `DatasetDigest`, `DatasetName` and `ModelId` fields for [ml.LogMetric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogMetric). +* Added `DatasetDigest`, `DatasetName`, `ModelId` and `RunId` fields for [ml.Metric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Metric). +* Added `ModelInputs` field for [ml.RunInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#RunInputs). +* Added `ClientApplication` field for [sql.QueryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryInfo). +* Added `Geography` and `Geometry` enum values for [catalog.ColumnTypeName](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ColumnTypeName). +* Added `AllocationTimeoutNoHealthyAndWarmedUpClusters`, `DockerContainerCreationException`, `DockerImageTooLargeForInstanceException` and `DockerInvalidOsException` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* Added `Standard` enum value for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). +* Added `CanView` enum value for [sql.WarehousePermissionLevel](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#WarehousePermissionLevel). +* [Breaking] Changed `GenerateDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service . Method path has changed. +* [Breaking] Changed waiter for [CommandExecutionAPI.Create](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Create). +* [Breaking] Changed waiter for [CommandExecutionAPI.Execute](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Execute). +* [Breaking] Removed `Error`, `Status` and `TransientStatementId` fields for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). +* [Breaking] Removed `Balanced` and `CostOptimized` enum values for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). +* [Breaking] Removed [PipelinesAPI.WaitGetPipelineRunning](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelinesAPI.WaitGetPipelineRunning) method. + + ## Release v0.62.0 ### Internal Changes diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 68b9d1c6e..bcf084760 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,6 +1,6 @@ # NEXT CHANGELOG -## Release v0.63.0 +## Release v0.64.0 ### New Features and Improvements @@ -11,29 +11,3 @@ ### Internal Changes ### API Changes -* Added [w.EnableExportNotebook](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableExportNotebookAPI) workspace-level service, [w.EnableNotebookTableClipboard](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableNotebookTableClipboardAPI) workspace-level service and [w.EnableResultsDownloading](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EnableResultsDownloadingAPI) workspace-level service. -* Added `GetCredentialsForTraceDataDownload` and `GetCredentialsForTraceDataUpload` methods for [w.Experiments](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#ExperimentsAPI) workspace-level service. -* Added `GetDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service. -* Added `GetPublishedDashboardTokenInfo` method for [w.LakeviewEmbedded](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#LakeviewEmbeddedAPI) workspace-level service. -* Added `BindingWorkspaceIds` field for [billing.BudgetPolicy](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/billing#BudgetPolicy). -* Added `DownloadId` field for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). -* Added `DashboardOutput` field for [jobs.RunOutput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunOutput). -* Added `DashboardTask` and `PowerBiTask` fields for [jobs.RunTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RunTask). -* Added `DashboardTask` and `PowerBiTask` fields for [jobs.SubmitTask](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#SubmitTask). -* Added `DashboardTask` and `PowerBiTask` fields for [jobs.Task](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#Task). -* Added `IncludeFeatures` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). -* Added `Models` field for [ml.LogInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogInputs). -* Added `DatasetDigest`, `DatasetName` and `ModelId` fields for [ml.LogMetric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#LogMetric). -* Added `DatasetDigest`, `DatasetName`, `ModelId` and `RunId` fields for [ml.Metric](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#Metric). -* Added `ModelInputs` field for [ml.RunInputs](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#RunInputs). -* Added `ClientApplication` field for [sql.QueryInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#QueryInfo). -* Added `Geography` and `Geometry` enum values for [catalog.ColumnTypeName](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#ColumnTypeName). -* Added `AllocationTimeoutNoHealthyAndWarmedUpClusters`, `DockerContainerCreationException`, `DockerImageTooLargeForInstanceException` and `DockerInvalidOsException` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). -* Added `Standard` enum value for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). -* Added `CanView` enum value for [sql.WarehousePermissionLevel](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#WarehousePermissionLevel). -* [Breaking] Changed `GenerateDownloadFullQueryResult` method for [w.Genie](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI) workspace-level service . Method path has changed. -* [Breaking] Changed waiter for [CommandExecutionAPI.Create](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Create). -* [Breaking] Changed waiter for [CommandExecutionAPI.Execute](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#CommandExecutionAPI.Execute). -* [Breaking] Removed `Error`, `Status` and `TransientStatementId` fields for [dashboards.GenieGenerateDownloadFullQueryResultResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieGenerateDownloadFullQueryResultResponse). -* [Breaking] Removed `Balanced` and `CostOptimized` enum values for [jobs.PerformanceTarget](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#PerformanceTarget). -* [Breaking] Removed [PipelinesAPI.WaitGetPipelineRunning](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelinesAPI.WaitGetPipelineRunning) method. diff --git a/version/version.go b/version/version.go index 20a1e06cc..0419e2713 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.62.0" +const Version = "0.63.0" From 68499d87ae3838d9ea5590f0080f5102d4c81e02 Mon Sep 17 00:00:00 2001 From: Omer Lachish <289488+rauchy@users.noreply.github.com> Date: Wed, 16 Apr 2025 10:02:05 +0200 Subject: [PATCH 40/54] Fix example test names to comply with Go 1.24 vet checks (#1203) ## What changes are proposed in this pull request? This PR updates the names of many `Example...` functions in `*_usage_test.go` files across multiple service packages to comply with stricter vet rules introduced in Go 1.24. - **WHAT**: Renamed example functions so that their names match real exported identifiers. - **WHY**: In Go 1.24, the `tests` analyzer in `go vet` enforces that example function names must map to valid exported identifiers. These stricter checks caused previously valid tests to fail during `go test`, even though the actual test logic remained unchanged. ## How is this tested? Validated by running `make test` locally with Go 1.24. All example tests now pass without build or vet errors. NO_CHANGELOG=true Co-authored-by: Omer Lachish --- service/billing/budgets_usage_test.go | 2 +- service/billing/log_delivery_usage_test.go | 2 +- service/catalog/catalogs_usage_test.go | 2 +- service/catalog/connections_usage_test.go | 2 +- service/catalog/external_locations_usage_test.go | 2 +- service/catalog/metastore_assignments_usage_test.go | 2 +- service/catalog/metastores_usage_test.go | 2 +- service/catalog/schemas_usage_test.go | 2 +- service/catalog/storage_credentials_usage_test.go | 2 +- service/catalog/tables_usage_test.go | 2 +- service/catalog/volumes_usage_test.go | 2 +- service/compute/cluster_policies_usage_test.go | 2 +- service/compute/clusters_usage_test.go | 2 +- service/compute/global_init_scripts_usage_test.go | 2 +- service/compute/instance_pools_usage_test.go | 2 +- service/compute/instance_profiles_usage_test.go | 2 +- service/compute/libraries_usage_test.go | 2 +- service/compute/policy_families_usage_test.go | 2 +- service/iam/service_principals_usage_test.go | 4 ++-- service/iam/users_usage_test.go | 2 +- service/iam/workspace_assignment_usage_test.go | 2 +- service/jobs/jobs_usage_test.go | 2 +- service/provisioning/credentials_usage_test.go | 2 +- service/provisioning/encryption_keys_usage_test.go | 2 +- service/provisioning/networks_usage_test.go | 2 +- service/provisioning/private_access_usage_test.go | 2 +- service/provisioning/storage_usage_test.go | 2 +- service/provisioning/vpc_endpoints_usage_test.go | 2 +- service/provisioning/workspaces_usage_test.go | 2 +- service/settings/ip_access_lists_usage_test.go | 2 +- service/settings/token_management_usage_test.go | 2 +- service/settings/tokens_usage_test.go | 4 ++-- service/sharing/providers_usage_test.go | 2 +- service/sharing/recipients_usage_test.go | 2 +- service/sharing/shares_usage_test.go | 2 +- service/sql/alerts_usage_test.go | 2 +- service/sql/dashboards_usage_test.go | 2 +- service/sql/data_sources_usage_test.go | 4 ++-- service/sql/query_history_usage_test.go | 2 +- service/sql/statement_execution_usage_test.go | 4 ++-- service/sql/warehouses_usage_test.go | 2 +- service/workspace/git_credentials_usage_test.go | 2 +- service/workspace/repos_usage_test.go | 2 +- service/workspace/workspace_usage_test.go | 2 +- 44 files changed, 48 insertions(+), 48 deletions(-) diff --git a/service/billing/budgets_usage_test.go b/service/billing/budgets_usage_test.go index da7b6cd8e..e0171227c 100755 --- a/service/billing/budgets_usage_test.go +++ b/service/billing/budgets_usage_test.go @@ -109,7 +109,7 @@ func ExampleBudgetsAPI_Get_budgets() { } -func ExampleBudgetsAPI_ListAll_budgets() { +func ExampleBudgetsAPI_List_budgets() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/billing/log_delivery_usage_test.go b/service/billing/log_delivery_usage_test.go index c5e9719f2..f9b100c88 100755 --- a/service/billing/log_delivery_usage_test.go +++ b/service/billing/log_delivery_usage_test.go @@ -151,7 +151,7 @@ func ExampleLogDeliveryAPI_Get_logDelivery() { } -func ExampleLogDeliveryAPI_ListAll_logDelivery() { +func ExampleLogDeliveryAPI_List_logDelivery() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/catalog/catalogs_usage_test.go b/service/catalog/catalogs_usage_test.go index ba634b8de..9879024aa 100755 --- a/service/catalog/catalogs_usage_test.go +++ b/service/catalog/catalogs_usage_test.go @@ -207,7 +207,7 @@ func ExampleCatalogsAPI_Get_catalogs() { } -func ExampleCatalogsAPI_ListAll_catalogs() { +func ExampleCatalogsAPI_List_catalogs() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/catalog/connections_usage_test.go b/service/catalog/connections_usage_test.go index 0dbe892ff..8888d368c 100755 --- a/service/catalog/connections_usage_test.go +++ b/service/catalog/connections_usage_test.go @@ -88,7 +88,7 @@ func ExampleConnectionsAPI_Get_connections() { } -func ExampleConnectionsAPI_ListAll_connections() { +func ExampleConnectionsAPI_List_connections() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/catalog/external_locations_usage_test.go b/service/catalog/external_locations_usage_test.go index abfaddb64..937c09d48 100755 --- a/service/catalog/external_locations_usage_test.go +++ b/service/catalog/external_locations_usage_test.go @@ -144,7 +144,7 @@ func ExampleExternalLocationsAPI_Get_externalLocationsOnAws() { } -func ExampleExternalLocationsAPI_ListAll_externalLocationsOnAws() { +func ExampleExternalLocationsAPI_List_externalLocationsOnAws() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/catalog/metastore_assignments_usage_test.go b/service/catalog/metastore_assignments_usage_test.go index b3c5b85c4..a1f6e075a 100755 --- a/service/catalog/metastore_assignments_usage_test.go +++ b/service/catalog/metastore_assignments_usage_test.go @@ -12,7 +12,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/catalog" ) -func ExampleMetastoreAssignmentsAPI_ListAll_metastoreAssignments() { +func ExampleAccountMetastoreAssignmentsAPI_ListAll_metastoreAssignments() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/catalog/metastores_usage_test.go b/service/catalog/metastores_usage_test.go index 4c3752dfe..8304b2bbd 100755 --- a/service/catalog/metastores_usage_test.go +++ b/service/catalog/metastores_usage_test.go @@ -135,7 +135,7 @@ func ExampleMetastoresAPI_Get_metastores() { } -func ExampleMetastoresAPI_ListAll_metastores() { +func ExampleMetastoresAPI_List_metastores() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/catalog/schemas_usage_test.go b/service/catalog/schemas_usage_test.go index 4807b237b..53b9f0900 100755 --- a/service/catalog/schemas_usage_test.go +++ b/service/catalog/schemas_usage_test.go @@ -218,7 +218,7 @@ func ExampleSchemasAPI_Get_schemas() { } -func ExampleSchemasAPI_ListAll_schemas() { +func ExampleSchemasAPI_List_schemas() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/catalog/storage_credentials_usage_test.go b/service/catalog/storage_credentials_usage_test.go index 132ff8c65..a632fefb1 100755 --- a/service/catalog/storage_credentials_usage_test.go +++ b/service/catalog/storage_credentials_usage_test.go @@ -129,7 +129,7 @@ func ExampleStorageCredentialsAPI_Get_storageCredentialsOnAws() { } -func ExampleStorageCredentialsAPI_ListAll_storageCredentialsOnAws() { +func ExampleStorageCredentialsAPI_List_storageCredentialsOnAws() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/catalog/tables_usage_test.go b/service/catalog/tables_usage_test.go index 6bfc77296..f33525d67 100755 --- a/service/catalog/tables_usage_test.go +++ b/service/catalog/tables_usage_test.go @@ -79,7 +79,7 @@ func ExampleTablesAPI_Get_tables() { } -func ExampleTablesAPI_ListAll_tables() { +func ExampleTablesAPI_List_tables() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/catalog/volumes_usage_test.go b/service/catalog/volumes_usage_test.go index ddbfdce42..be1bf352f 100755 --- a/service/catalog/volumes_usage_test.go +++ b/service/catalog/volumes_usage_test.go @@ -101,7 +101,7 @@ func ExampleVolumesAPI_Create_volumes() { } -func ExampleVolumesAPI_ListAll_volumes() { +func ExampleVolumesAPI_List_volumes() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/compute/cluster_policies_usage_test.go b/service/compute/cluster_policies_usage_test.go index 207077560..9e205618b 100755 --- a/service/compute/cluster_policies_usage_test.go +++ b/service/compute/cluster_policies_usage_test.go @@ -133,7 +133,7 @@ func ExampleClusterPoliciesAPI_Get_clusterPolicies() { } -func ExampleClusterPoliciesAPI_ListAll_clusterPolicies() { +func ExampleClusterPoliciesAPI_List_clusterPolicies() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/compute/clusters_usage_test.go b/service/compute/clusters_usage_test.go index d2b1e6aed..6747273e0 100755 --- a/service/compute/clusters_usage_test.go +++ b/service/compute/clusters_usage_test.go @@ -335,7 +335,7 @@ func ExampleClustersAPI_Get_clustersApiIntegration() { } -func ExampleClustersAPI_ListAll_clustersApiIntegration() { +func ExampleClustersAPI_List_clustersApiIntegration() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/compute/global_init_scripts_usage_test.go b/service/compute/global_init_scripts_usage_test.go index 01f1b79af..afb7f380a 100755 --- a/service/compute/global_init_scripts_usage_test.go +++ b/service/compute/global_init_scripts_usage_test.go @@ -74,7 +74,7 @@ func ExampleGlobalInitScriptsAPI_Get_globalInitScripts() { } -func ExampleGlobalInitScriptsAPI_ListAll_globalInitScripts() { +func ExampleGlobalInitScriptsAPI_List_globalInitScripts() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/compute/instance_pools_usage_test.go b/service/compute/instance_pools_usage_test.go index e2dfe9ba1..5d08fb188 100755 --- a/service/compute/instance_pools_usage_test.go +++ b/service/compute/instance_pools_usage_test.go @@ -127,7 +127,7 @@ func ExampleInstancePoolsAPI_Get_instancePools() { } -func ExampleInstancePoolsAPI_ListAll_instancePools() { +func ExampleInstancePoolsAPI_List_instancePools() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/compute/instance_profiles_usage_test.go b/service/compute/instance_profiles_usage_test.go index d85aa3ab9..44823c7e0 100755 --- a/service/compute/instance_profiles_usage_test.go +++ b/service/compute/instance_profiles_usage_test.go @@ -50,7 +50,7 @@ func ExampleInstanceProfilesAPI_Edit_awsInstanceProfiles() { } -func ExampleInstanceProfilesAPI_ListAll_awsInstanceProfiles() { +func ExampleInstanceProfilesAPI_List_awsInstanceProfiles() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/compute/libraries_usage_test.go b/service/compute/libraries_usage_test.go index 538b4c827..4351b1d00 100755 --- a/service/compute/libraries_usage_test.go +++ b/service/compute/libraries_usage_test.go @@ -11,7 +11,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/compute" ) -func ExampleLibrariesAPI_Update_libraries() { +func ExampleLibrariesAPI_UpdateAndWait_libraries() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/compute/policy_families_usage_test.go b/service/compute/policy_families_usage_test.go index 761fd125f..43f21080c 100755 --- a/service/compute/policy_families_usage_test.go +++ b/service/compute/policy_families_usage_test.go @@ -34,7 +34,7 @@ func ExamplePolicyFamiliesAPI_Get_clusterPolicyFamilies() { } -func ExamplePolicyFamiliesAPI_ListAll_clusterPolicyFamilies() { +func ExamplePolicyFamiliesAPI_List_clusterPolicyFamilies() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/iam/service_principals_usage_test.go b/service/iam/service_principals_usage_test.go index 159ecfc91..7ec3da4aa 100755 --- a/service/iam/service_principals_usage_test.go +++ b/service/iam/service_principals_usage_test.go @@ -177,7 +177,7 @@ func ExampleServicePrincipalsAPI_Get_servicePrincipalsOnAws() { } -func ExampleServicePrincipalsAPI_ListAll_accountServicePrincipal() { +func ExampleServicePrincipalsAPI_List_accountServicePrincipal() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { @@ -218,7 +218,7 @@ func ExampleServicePrincipalsAPI_ListAll_accountServicePrincipal() { } -func ExampleServicePrincipalsAPI_ListAll_servicePrincipalsOnAws() { +func ExampleServicePrincipalsAPI_List_servicePrincipalsOnAws() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/iam/users_usage_test.go b/service/iam/users_usage_test.go index c1326b45e..f7eb97825 100755 --- a/service/iam/users_usage_test.go +++ b/service/iam/users_usage_test.go @@ -203,7 +203,7 @@ func ExampleUsersAPI_Get_accountUsers() { } -func ExampleUsersAPI_ListAll_workspaceUsers() { +func ExampleUsersAPI_List_workspaceUsers() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/iam/workspace_assignment_usage_test.go b/service/iam/workspace_assignment_usage_test.go index 3ea024d3f..8b8f32d02 100755 --- a/service/iam/workspace_assignment_usage_test.go +++ b/service/iam/workspace_assignment_usage_test.go @@ -15,7 +15,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/iam" ) -func ExampleWorkspaceAssignmentAPI_ListAll_workspaceAssignmentOnAws() { +func ExampleWorkspaceAssignmentAPI_List_workspaceAssignmentOnAws() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/jobs/jobs_usage_test.go b/service/jobs/jobs_usage_test.go index d05292d5b..8e3e690f8 100755 --- a/service/jobs/jobs_usage_test.go +++ b/service/jobs/jobs_usage_test.go @@ -365,7 +365,7 @@ func ExampleJobsAPI_GetRunOutput_jobsApiFullIntegration() { } -func ExampleJobsAPI_ListAll_jobsApiFullIntegration() { +func ExampleJobsAPI_List_jobsApiFullIntegration() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/provisioning/credentials_usage_test.go b/service/provisioning/credentials_usage_test.go index a3da09222..ae6e15797 100755 --- a/service/provisioning/credentials_usage_test.go +++ b/service/provisioning/credentials_usage_test.go @@ -136,7 +136,7 @@ func ExampleCredentialsAPI_Get_credentials() { } -func ExampleCredentialsAPI_ListAll_credentials() { +func ExampleCredentialsAPI_List_credentials() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/provisioning/encryption_keys_usage_test.go b/service/provisioning/encryption_keys_usage_test.go index 9b7918dfd..6730f9d3c 100755 --- a/service/provisioning/encryption_keys_usage_test.go +++ b/service/provisioning/encryption_keys_usage_test.go @@ -74,7 +74,7 @@ func ExampleEncryptionKeysAPI_Get_encryptionKeys() { } -func ExampleEncryptionKeysAPI_ListAll_encryptionKeys() { +func ExampleEncryptionKeysAPI_List_encryptionKeys() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/provisioning/networks_usage_test.go b/service/provisioning/networks_usage_test.go index bf3f34c4d..35acbc57d 100755 --- a/service/provisioning/networks_usage_test.go +++ b/service/provisioning/networks_usage_test.go @@ -59,7 +59,7 @@ func ExampleNetworksAPI_Get_networks() { } -func ExampleNetworksAPI_ListAll_networks() { +func ExampleNetworksAPI_List_networks() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/provisioning/private_access_usage_test.go b/service/provisioning/private_access_usage_test.go index 13d89ec7e..c15d96d9f 100755 --- a/service/provisioning/private_access_usage_test.go +++ b/service/provisioning/private_access_usage_test.go @@ -70,7 +70,7 @@ func ExamplePrivateAccessAPI_Get_privateAccess() { } -func ExamplePrivateAccessAPI_ListAll_privateAccess() { +func ExamplePrivateAccessAPI_List_privateAccess() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/provisioning/storage_usage_test.go b/service/provisioning/storage_usage_test.go index 0c2738918..c2ca34d13 100755 --- a/service/provisioning/storage_usage_test.go +++ b/service/provisioning/storage_usage_test.go @@ -114,7 +114,7 @@ func ExampleStorageAPI_Get_storage() { } -func ExampleStorageAPI_ListAll_storage() { +func ExampleStorageAPI_List_storage() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/provisioning/vpc_endpoints_usage_test.go b/service/provisioning/vpc_endpoints_usage_test.go index c13806d6a..1a776259b 100755 --- a/service/provisioning/vpc_endpoints_usage_test.go +++ b/service/provisioning/vpc_endpoints_usage_test.go @@ -72,7 +72,7 @@ func ExampleVpcEndpointsAPI_Get_vpcEndpoints() { } -func ExampleVpcEndpointsAPI_ListAll_vpcEndpoints() { +func ExampleVpcEndpointsAPI_List_vpcEndpoints() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/provisioning/workspaces_usage_test.go b/service/provisioning/workspaces_usage_test.go index e50e4c98c..813c40ca5 100755 --- a/service/provisioning/workspaces_usage_test.go +++ b/service/provisioning/workspaces_usage_test.go @@ -138,7 +138,7 @@ func ExampleWorkspacesAPI_Get_workspaces() { } -func ExampleWorkspacesAPI_ListAll_workspaces() { +func ExampleWorkspacesAPI_List_workspaces() { ctx := context.Background() a, err := databricks.NewAccountClient() if err != nil { diff --git a/service/settings/ip_access_lists_usage_test.go b/service/settings/ip_access_lists_usage_test.go index 55aa19fc1..e8359b696 100755 --- a/service/settings/ip_access_lists_usage_test.go +++ b/service/settings/ip_access_lists_usage_test.go @@ -71,7 +71,7 @@ func ExampleIpAccessListsAPI_Get_ipAccessLists() { } -func ExampleIpAccessListsAPI_ListAll_ipAccessLists() { +func ExampleIpAccessListsAPI_List_ipAccessLists() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/settings/token_management_usage_test.go b/service/settings/token_management_usage_test.go index cdc1baccb..388b4a7dc 100755 --- a/service/settings/token_management_usage_test.go +++ b/service/settings/token_management_usage_test.go @@ -112,7 +112,7 @@ func ExampleTokenManagementAPI_Get_createOboTokenOnAws() { } -func ExampleTokenManagementAPI_ListAll_createOboTokenOnAws() { +func ExampleTokenManagementAPI_List_createOboTokenOnAws() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/settings/tokens_usage_test.go b/service/settings/tokens_usage_test.go index ccd8983fe..190d0646c 100755 --- a/service/settings/tokens_usage_test.go +++ b/service/settings/tokens_usage_test.go @@ -38,7 +38,7 @@ func ExampleTokensAPI_Create_tokens() { } -func ExampleTokensAPI_Get_tokens() { +func ExampleTokensAPI_GetByComment_tokens() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { @@ -69,7 +69,7 @@ func ExampleTokensAPI_Get_tokens() { } -func ExampleTokensAPI_ListAll_tokens() { +func ExampleTokensAPI_List_tokens() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/sharing/providers_usage_test.go b/service/sharing/providers_usage_test.go index d8ce5613e..9000a6abf 100755 --- a/service/sharing/providers_usage_test.go +++ b/service/sharing/providers_usage_test.go @@ -82,7 +82,7 @@ func ExampleProvidersAPI_Get_providers() { } -func ExampleProvidersAPI_ListAll_providers() { +func ExampleProvidersAPI_List_providers() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/sharing/recipients_usage_test.go b/service/sharing/recipients_usage_test.go index 0d312454c..f01913bd3 100755 --- a/service/sharing/recipients_usage_test.go +++ b/service/sharing/recipients_usage_test.go @@ -66,7 +66,7 @@ func ExampleRecipientsAPI_Get_recipients() { } -func ExampleRecipientsAPI_ListAll_recipients() { +func ExampleRecipientsAPI_List_recipients() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/sharing/shares_usage_test.go b/service/sharing/shares_usage_test.go index fe2015082..e3fdfc802 100755 --- a/service/sharing/shares_usage_test.go +++ b/service/sharing/shares_usage_test.go @@ -69,7 +69,7 @@ func ExampleSharesAPI_Get_shares() { } -func ExampleSharesAPI_ListAll_shares() { +func ExampleSharesAPI_List_shares() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/sql/alerts_usage_test.go b/service/sql/alerts_usage_test.go index f919e7a11..e8156a24b 100755 --- a/service/sql/alerts_usage_test.go +++ b/service/sql/alerts_usage_test.go @@ -145,7 +145,7 @@ func ExampleAlertsAPI_Get_alerts() { } -func ExampleAlertsAPI_ListAll_alerts() { +func ExampleAlertsAPI_List_alerts() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/sql/dashboards_usage_test.go b/service/sql/dashboards_usage_test.go index 19e3809cf..32c95a5ef 100755 --- a/service/sql/dashboards_usage_test.go +++ b/service/sql/dashboards_usage_test.go @@ -96,7 +96,7 @@ func ExampleDashboardsAPI_Get_dashboards() { } -func ExampleDashboardsAPI_ListAll_dashboards() { +func ExampleDashboardsAPI_List_dashboards() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/sql/data_sources_usage_test.go b/service/sql/data_sources_usage_test.go index 7a683b48e..613b063e9 100755 --- a/service/sql/data_sources_usage_test.go +++ b/service/sql/data_sources_usage_test.go @@ -9,7 +9,7 @@ import ( "github.com/databricks/databricks-sdk-go/logger" ) -func ExampleDataSourcesAPI_ListAll_queries() { +func ExampleDataSourcesAPI_List_queries() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { @@ -24,7 +24,7 @@ func ExampleDataSourcesAPI_ListAll_queries() { } -func ExampleDataSourcesAPI_ListAll_alerts() { +func ExampleDataSourcesAPI_List_alerts() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/sql/query_history_usage_test.go b/service/sql/query_history_usage_test.go index 6352821ee..5ef05920d 100755 --- a/service/sql/query_history_usage_test.go +++ b/service/sql/query_history_usage_test.go @@ -10,7 +10,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/sql" ) -func ExampleQueryHistoryAPI_ListAll_sqlQueryHistory() { +func ExampleQueryHistoryAPI_List_sqlQueryHistory() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/sql/statement_execution_usage_test.go b/service/sql/statement_execution_usage_test.go index a88bccf68..4a857d055 100755 --- a/service/sql/statement_execution_usage_test.go +++ b/service/sql/statement_execution_usage_test.go @@ -15,7 +15,7 @@ import ( "github.com/databricks/databricks-sdk-go/service/sql" ) -func ExampleStatementExecutionAPI_Execute_tables() { +func ExampleStatementExecutionAPI_ExecuteAndWait_tables() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { @@ -67,7 +67,7 @@ func ExampleStatementExecutionAPI_Execute_tables() { } -func ExampleStatementExecutionAPI_Execute_shares() { +func ExampleStatementExecutionAPI_ExecuteAndWait_shares() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/sql/warehouses_usage_test.go b/service/sql/warehouses_usage_test.go index 6eb2f60fc..d01855729 100755 --- a/service/sql/warehouses_usage_test.go +++ b/service/sql/warehouses_usage_test.go @@ -129,7 +129,7 @@ func ExampleWarehousesAPI_Get_sqlWarehouses() { } -func ExampleWarehousesAPI_ListAll_sqlWarehouses() { +func ExampleWarehousesAPI_List_sqlWarehouses() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/workspace/git_credentials_usage_test.go b/service/workspace/git_credentials_usage_test.go index 7a91ecbcf..53b6fd7c1 100755 --- a/service/workspace/git_credentials_usage_test.go +++ b/service/workspace/git_credentials_usage_test.go @@ -71,7 +71,7 @@ func ExampleGitCredentialsAPI_Get_gitCredentials() { } -func ExampleGitCredentialsAPI_ListAll_gitCredentials() { +func ExampleGitCredentialsAPI_List_gitCredentials() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/workspace/repos_usage_test.go b/service/workspace/repos_usage_test.go index 0aa2d3460..133b6e53d 100755 --- a/service/workspace/repos_usage_test.go +++ b/service/workspace/repos_usage_test.go @@ -75,7 +75,7 @@ func ExampleReposAPI_Get_repos() { } -func ExampleReposAPI_ListAll_repos() { +func ExampleReposAPI_List_repos() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { diff --git a/service/workspace/workspace_usage_test.go b/service/workspace/workspace_usage_test.go index 083fe60b5..72b6f6570 100755 --- a/service/workspace/workspace_usage_test.go +++ b/service/workspace/workspace_usage_test.go @@ -203,7 +203,7 @@ func ExampleWorkspaceAPI_Import_workspaceIntegration() { } -func ExampleWorkspaceAPI_ListAll_workspaceIntegration() { +func ExampleWorkspaceAPI_List_workspaceIntegration() { ctx := context.Background() w, err := databricks.NewWorkspaceClient() if err != nil { From 79e4b3a6e9b0b7dcb1af9ad4025deb447b01d933 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Thu, 17 Apr 2025 13:40:08 +0200 Subject: [PATCH 41/54] Enabled asynchronous token refreshes by default (#1208) ## What changes are proposed in this pull request? Enabled asynchronous token refreshes by default ## How is this tested? Integration tests will be run using the new default --- NEXT_CHANGELOG.md | 1 + config/experimental/auth/dataplane/dataplane.go | 2 -- 2 files changed, 1 insertion(+), 2 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index bcf084760..8c772f608 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -3,6 +3,7 @@ ## Release v0.64.0 ### New Features and Improvements +* Enabled asynchronous token refreshes by default ([#1208](https://github.com/databricks/databricks-sdk-go/pull/1208)). ### Bug Fixes diff --git a/config/experimental/auth/dataplane/dataplane.go b/config/experimental/auth/dataplane/dataplane.go index e9b943192..be8348de6 100644 --- a/config/experimental/auth/dataplane/dataplane.go +++ b/config/experimental/auth/dataplane/dataplane.go @@ -28,7 +28,6 @@ func NewEndpointTokenSource(c OAuthClient, cpts auth.TokenSource) *dataPlaneToke client: c, cpts: auth.NewCachedTokenSource( cpts, - auth.WithAsyncRefresh(false), // TODO: Enable async refreshes once the feature is stable. ), } } @@ -65,7 +64,6 @@ func (dpts *dataPlaneTokenSource) Token(ctx context.Context, endpoint string, au cpts: dpts.cpts, authDetails: authDetails, }, - auth.WithAsyncRefresh(false), // TODO: Enable async refresh once the feature is stable. ) dpts.sources.Store(key, ts) From e7f17368e3a70dddfddf8d91b628e93d0034e9bc Mon Sep 17 00:00:00 2001 From: Tanmay Rustagi <88379306+tanmay-db@users.noreply.github.com> Date: Thu, 24 Apr 2025 20:22:24 +0200 Subject: [PATCH 42/54] [Internal] Update SDK to latest OpenAPI spec (#1209) ## What changes are proposed in this pull request? Update SDK to latest OpenAPI spec ## How is this tested? N/A --- .codegen/_openapi_sha | 2 +- .gitattributes | 3 + NEXT_CHANGELOG.md | 17 ++ .../mock_vector_search_endpoints_interface.go | 118 ++++++++ .../mock_vector_search_indexes_interface.go | 73 ++++- service/cleanrooms/model.go | 2 +- service/compute/model.go | 44 +++ service/dashboards/api.go | 58 ++-- service/dashboards/interface.go | 24 +- service/dashboards/model.go | 6 +- service/jobs/model.go | 64 ++++- service/pkg.go | 6 +- service/settings/api.go | 36 +-- service/settings/interface.go | 36 +-- service/settings/model.go | 6 +- service/vectorsearch/api.go | 44 ++- service/vectorsearch/impl.go | 40 ++- service/vectorsearch/interface.go | 20 +- service/vectorsearch/model.go | 264 ++++++++++++------ workspace_client.go | 4 +- 20 files changed, 647 insertions(+), 220 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 26ece1bc5..8cd956362 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -05692f4dcf168be190bb7bcda725ee8b368b7ae3 \ No newline at end of file +06a18b97d7996d6cd8dd88bfdb0f2c2792739e46 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index da04156a9..3296c8f9f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -110,7 +110,10 @@ experimental/mocks/service/settings/mock_default_namespace_interface.go linguist experimental/mocks/service/settings/mock_disable_legacy_access_interface.go linguist-generated=true experimental/mocks/service/settings/mock_disable_legacy_dbfs_interface.go linguist-generated=true experimental/mocks/service/settings/mock_disable_legacy_features_interface.go linguist-generated=true +experimental/mocks/service/settings/mock_enable_export_notebook_interface.go linguist-generated=true experimental/mocks/service/settings/mock_enable_ip_access_lists_interface.go linguist-generated=true +experimental/mocks/service/settings/mock_enable_notebook_table_clipboard_interface.go linguist-generated=true +experimental/mocks/service/settings/mock_enable_results_downloading_interface.go linguist-generated=true experimental/mocks/service/settings/mock_enhanced_security_monitoring_interface.go linguist-generated=true experimental/mocks/service/settings/mock_esm_enablement_account_interface.go linguist-generated=true experimental/mocks/service/settings/mock_ip_access_lists_interface.go linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 8c772f608..d7c78f201 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -12,3 +12,20 @@ ### Internal Changes ### API Changes +* Added `UpdateEndpointBudgetPolicy` and `UpdateEndpointCustomTags` methods for [w.VectorSearchEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI) workspace-level service. +* Added `NodeTypeFlexibility` field for [compute.EditInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EditInstancePool). +* Added `PageSize` and `PageToken` fields for [compute.GetEvents](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEvents). +* Added `NextPageToken` and `PrevPageToken` fields for [compute.GetEventsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEventsResponse). +* Added `NodeTypeFlexibility` field for [compute.GetInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetInstancePool). +* Added `NodeTypeFlexibility` field for [compute.InstancePoolAndStats](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InstancePoolAndStats). +* Added `EffectivePerformanceTarget` field for [jobs.RepairHistoryItem](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairHistoryItem). +* Added `PerformanceTarget` field for [jobs.RepairRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairRun). +* Added `BudgetPolicyId` field for [vectorsearch.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#CreateEndpoint). +* Added `CustomTags` and `EffectiveBudgetPolicyId` fields for [vectorsearch.EndpointInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#EndpointInfo). +* Added `Disabled` enum value for [jobs.TerminationCodeCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TerminationCodeCode). +* [Breaking] Changed `CreateIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service to return [vectorsearch.VectorIndex](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorIndex). +* [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service . HTTP method/verb has changed. +* [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service with new required argument order. +* [Breaking] Changed `DataArray` field for [vectorsearch.ResultData](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ResultData) to type [vectorsearch.ListValueList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValueList). +* [Breaking] Changed waiter for [VectorSearchEndpointsAPI.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI.CreateEndpoint). +* [Breaking] Removed `NullValue` field for [vectorsearch.Value](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#Value). diff --git a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go index 50e397591..9cc2b4d0f 100644 --- a/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go +++ b/experimental/mocks/service/vectorsearch/mock_vector_search_endpoints_interface.go @@ -481,6 +481,124 @@ func (_c *MockVectorSearchEndpointsInterface_ListEndpointsAll_Call) RunAndReturn return _c } +// UpdateEndpointBudgetPolicy provides a mock function with given fields: ctx, request +func (_m *MockVectorSearchEndpointsInterface) UpdateEndpointBudgetPolicy(ctx context.Context, request vectorsearch.PatchEndpointBudgetPolicyRequest) (*vectorsearch.PatchEndpointBudgetPolicyResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for UpdateEndpointBudgetPolicy") + } + + var r0 *vectorsearch.PatchEndpointBudgetPolicyResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.PatchEndpointBudgetPolicyRequest) (*vectorsearch.PatchEndpointBudgetPolicyResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.PatchEndpointBudgetPolicyRequest) *vectorsearch.PatchEndpointBudgetPolicyResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vectorsearch.PatchEndpointBudgetPolicyResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, vectorsearch.PatchEndpointBudgetPolicyRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEndpointBudgetPolicy' +type MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call struct { + *mock.Call +} + +// UpdateEndpointBudgetPolicy is a helper method to define mock.On call +// - ctx context.Context +// - request vectorsearch.PatchEndpointBudgetPolicyRequest +func (_e *MockVectorSearchEndpointsInterface_Expecter) UpdateEndpointBudgetPolicy(ctx interface{}, request interface{}) *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call { + return &MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call{Call: _e.mock.On("UpdateEndpointBudgetPolicy", ctx, request)} +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call) Run(run func(ctx context.Context, request vectorsearch.PatchEndpointBudgetPolicyRequest)) *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(vectorsearch.PatchEndpointBudgetPolicyRequest)) + }) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call) Return(_a0 *vectorsearch.PatchEndpointBudgetPolicyResponse, _a1 error) *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call) RunAndReturn(run func(context.Context, vectorsearch.PatchEndpointBudgetPolicyRequest) (*vectorsearch.PatchEndpointBudgetPolicyResponse, error)) *MockVectorSearchEndpointsInterface_UpdateEndpointBudgetPolicy_Call { + _c.Call.Return(run) + return _c +} + +// UpdateEndpointCustomTags provides a mock function with given fields: ctx, request +func (_m *MockVectorSearchEndpointsInterface) UpdateEndpointCustomTags(ctx context.Context, request vectorsearch.UpdateEndpointCustomTagsRequest) (*vectorsearch.UpdateEndpointCustomTagsResponse, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for UpdateEndpointCustomTags") + } + + var r0 *vectorsearch.UpdateEndpointCustomTagsResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.UpdateEndpointCustomTagsRequest) (*vectorsearch.UpdateEndpointCustomTagsResponse, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.UpdateEndpointCustomTagsRequest) *vectorsearch.UpdateEndpointCustomTagsResponse); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vectorsearch.UpdateEndpointCustomTagsResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, vectorsearch.UpdateEndpointCustomTagsRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateEndpointCustomTags' +type MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call struct { + *mock.Call +} + +// UpdateEndpointCustomTags is a helper method to define mock.On call +// - ctx context.Context +// - request vectorsearch.UpdateEndpointCustomTagsRequest +func (_e *MockVectorSearchEndpointsInterface_Expecter) UpdateEndpointCustomTags(ctx interface{}, request interface{}) *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call { + return &MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call{Call: _e.mock.On("UpdateEndpointCustomTags", ctx, request)} +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call) Run(run func(ctx context.Context, request vectorsearch.UpdateEndpointCustomTagsRequest)) *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(vectorsearch.UpdateEndpointCustomTagsRequest)) + }) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call) Return(_a0 *vectorsearch.UpdateEndpointCustomTagsResponse, _a1 error) *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call) RunAndReturn(run func(context.Context, vectorsearch.UpdateEndpointCustomTagsRequest) (*vectorsearch.UpdateEndpointCustomTagsResponse, error)) *MockVectorSearchEndpointsInterface_UpdateEndpointCustomTags_Call { + _c.Call.Return(run) + return _c +} + // WaitGetEndpointVectorSearchEndpointOnline provides a mock function with given fields: ctx, endpointName, timeout, callback func (_m *MockVectorSearchEndpointsInterface) WaitGetEndpointVectorSearchEndpointOnline(ctx context.Context, endpointName string, timeout time.Duration, callback func(*vectorsearch.EndpointInfo)) (*vectorsearch.EndpointInfo, error) { ret := _m.Called(ctx, endpointName, timeout, callback) diff --git a/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go b/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go index 8fd95e648..b396a28a5 100644 --- a/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go +++ b/experimental/mocks/service/vectorsearch/mock_vector_search_indexes_interface.go @@ -25,23 +25,23 @@ func (_m *MockVectorSearchIndexesInterface) EXPECT() *MockVectorSearchIndexesInt } // CreateIndex provides a mock function with given fields: ctx, request -func (_m *MockVectorSearchIndexesInterface) CreateIndex(ctx context.Context, request vectorsearch.CreateVectorIndexRequest) (*vectorsearch.CreateVectorIndexResponse, error) { +func (_m *MockVectorSearchIndexesInterface) CreateIndex(ctx context.Context, request vectorsearch.CreateVectorIndexRequest) (*vectorsearch.VectorIndex, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for CreateIndex") } - var r0 *vectorsearch.CreateVectorIndexResponse + var r0 *vectorsearch.VectorIndex var r1 error - if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.CreateVectorIndexRequest) (*vectorsearch.CreateVectorIndexResponse, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.CreateVectorIndexRequest) (*vectorsearch.VectorIndex, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.CreateVectorIndexRequest) *vectorsearch.CreateVectorIndexResponse); ok { + if rf, ok := ret.Get(0).(func(context.Context, vectorsearch.CreateVectorIndexRequest) *vectorsearch.VectorIndex); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*vectorsearch.CreateVectorIndexResponse) + r0 = ret.Get(0).(*vectorsearch.VectorIndex) } } @@ -73,12 +73,12 @@ func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) Run(run func(ctx co return _c } -func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) Return(_a0 *vectorsearch.CreateVectorIndexResponse, _a1 error) *MockVectorSearchIndexesInterface_CreateIndex_Call { +func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) Return(_a0 *vectorsearch.VectorIndex, _a1 error) *MockVectorSearchIndexesInterface_CreateIndex_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) RunAndReturn(run func(context.Context, vectorsearch.CreateVectorIndexRequest) (*vectorsearch.CreateVectorIndexResponse, error)) *MockVectorSearchIndexesInterface_CreateIndex_Call { +func (_c *MockVectorSearchIndexesInterface_CreateIndex_Call) RunAndReturn(run func(context.Context, vectorsearch.CreateVectorIndexRequest) (*vectorsearch.VectorIndex, error)) *MockVectorSearchIndexesInterface_CreateIndex_Call { _c.Call.Return(run) return _c } @@ -142,6 +142,65 @@ func (_c *MockVectorSearchIndexesInterface_DeleteDataVectorIndex_Call) RunAndRet return _c } +// DeleteDataVectorIndexByIndexName provides a mock function with given fields: ctx, indexName +func (_m *MockVectorSearchIndexesInterface) DeleteDataVectorIndexByIndexName(ctx context.Context, indexName string) (*vectorsearch.DeleteDataVectorIndexResponse, error) { + ret := _m.Called(ctx, indexName) + + if len(ret) == 0 { + panic("no return value specified for DeleteDataVectorIndexByIndexName") + } + + var r0 *vectorsearch.DeleteDataVectorIndexResponse + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*vectorsearch.DeleteDataVectorIndexResponse, error)); ok { + return rf(ctx, indexName) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *vectorsearch.DeleteDataVectorIndexResponse); ok { + r0 = rf(ctx, indexName) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*vectorsearch.DeleteDataVectorIndexResponse) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, indexName) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'DeleteDataVectorIndexByIndexName' +type MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call struct { + *mock.Call +} + +// DeleteDataVectorIndexByIndexName is a helper method to define mock.On call +// - ctx context.Context +// - indexName string +func (_e *MockVectorSearchIndexesInterface_Expecter) DeleteDataVectorIndexByIndexName(ctx interface{}, indexName interface{}) *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call { + return &MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call{Call: _e.mock.On("DeleteDataVectorIndexByIndexName", ctx, indexName)} +} + +func (_c *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call) Run(run func(ctx context.Context, indexName string)) *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call) Return(_a0 *vectorsearch.DeleteDataVectorIndexResponse, _a1 error) *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call) RunAndReturn(run func(context.Context, string) (*vectorsearch.DeleteDataVectorIndexResponse, error)) *MockVectorSearchIndexesInterface_DeleteDataVectorIndexByIndexName_Call { + _c.Call.Return(run) + return _c +} + // DeleteIndex provides a mock function with given fields: ctx, request func (_m *MockVectorSearchIndexesInterface) DeleteIndex(ctx context.Context, request vectorsearch.DeleteIndexRequest) error { ret := _m.Called(ctx, request) diff --git a/service/cleanrooms/model.go b/service/cleanrooms/model.go index ac92cb087..7b7a441a0 100755 --- a/service/cleanrooms/model.go +++ b/service/cleanrooms/model.go @@ -191,7 +191,7 @@ func (s CleanRoomAssetForeignTableLocalDetails) MarshalJSON() ([]byte, error) { } type CleanRoomAssetNotebook struct { - // Server generated checksum that represents the notebook version. + // Server generated etag that represents the notebook version. Etag string `json:"etag,omitempty"` // Base 64 representation of the notebook contents. This is the same format // as returned by :method:workspace/export with the format of **HTML**. diff --git a/service/compute/model.go b/service/compute/model.go index c34ffb688..394488a1d 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2631,6 +2631,10 @@ type EditInstancePool struct { MaxCapacity int `json:"max_capacity,omitempty"` // Minimum number of idle instances to keep in the instance pool MinIdleInstances int `json:"min_idle_instances,omitempty"` + // For Fleet-pool V2, this object contains the information about the + // alternate node type ids to use when attempting to launch a cluster if the + // node type id is not available. + NodeTypeFlexibility *NodeTypeFlexibility `json:"node_type_flexibility,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can // be provisioned and optimized for memory or compute intensive workloads. A @@ -3104,15 +3108,29 @@ type GetEvents struct { // An optional set of event types to filter on. If empty, all event types // are returned. EventTypes []EventType `json:"event_types,omitempty"` + // Deprecated: use page_token in combination with page_size instead. + // // The maximum number of events to include in a page of events. Defaults to // 50, and maximum allowed value is 500. Limit int64 `json:"limit,omitempty"` + // Deprecated: use page_token in combination with page_size instead. + // // The offset in the result set. Defaults to 0 (no offset). When an offset // is specified and the results are requested in descending order, the // end_time field is required. Offset int64 `json:"offset,omitempty"` // The order to list events in; either "ASC" or "DESC". Defaults to "DESC". Order GetEventsOrder `json:"order,omitempty"` + // The maximum number of events to include in a page of events. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is empty or 0, the server will decide the number + // of results to be returned. The field has to be in the range [0,500]. If + // the value is outside the range, the server enforces 0 or 500. + PageSize int `json:"page_size,omitempty"` + // Use next_page_token or prev_page_token returned from the previous request + // to list the next or previous page of events respectively. If page_token + // is empty, the first page is returned. + PageToken string `json:"page_token,omitempty"` // The start time in epoch milliseconds. If empty, returns events starting // from the beginning of time. StartTime int64 `json:"start_time,omitempty"` @@ -3157,9 +3175,21 @@ func (f *GetEventsOrder) Type() string { type GetEventsResponse struct { Events []ClusterEvent `json:"events,omitempty"` + // Deprecated: use next_page_token or prev_page_token instead. + // // The parameters required to retrieve the next page of events. Omitted if // there are no more events to read. NextPage *GetEvents `json:"next_page,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is "", it means no further results for the request. + NextPageToken string `json:"next_page_token,omitempty"` + // This field represents the pagination token to retrieve the previous page + // of results. If the value is "", it means no further results for the + // request. + PrevPageToken string `json:"prev_page_token,omitempty"` + // Deprecated: Returns 0 when request uses page_token. Will start returning + // zero when request uses offset/limit soon. + // // The total number of events filtered by the start_time, end_time, and // event_types. TotalCount int64 `json:"total_count,omitempty"` @@ -3236,6 +3266,10 @@ type GetInstancePool struct { MaxCapacity int `json:"max_capacity,omitempty"` // Minimum number of idle instances to keep in the instance pool MinIdleInstances int `json:"min_idle_instances,omitempty"` + // For Fleet-pool V2, this object contains the information about the + // alternate node type ids to use when attempting to launch a cluster if the + // node type id is not available. + NodeTypeFlexibility *NodeTypeFlexibility `json:"node_type_flexibility,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can // be provisioned and optimized for memory or compute intensive workloads. A @@ -3689,6 +3723,10 @@ type InstancePoolAndStats struct { MaxCapacity int `json:"max_capacity,omitempty"` // Minimum number of idle instances to keep in the instance pool MinIdleInstances int `json:"min_idle_instances,omitempty"` + // For Fleet-pool V2, this object contains the information about the + // alternate node type ids to use when attempting to launch a cluster if the + // node type id is not available. + NodeTypeFlexibility *NodeTypeFlexibility `json:"node_type_flexibility,omitempty"` // This field encodes, through a single value, the resources available to // each of the Spark nodes in this cluster. For example, the Spark nodes can // be provisioned and optimized for memory or compute intensive workloads. A @@ -4722,6 +4760,12 @@ func (s NodeType) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// For Fleet-V2 using classic clusters, this object contains the information +// about the alternate node type ids to use when attempting to launch a cluster. +// It can be used with both the driver and worker node types. +type NodeTypeFlexibility struct { +} + // Error message of a failed pending instances type PendingInstanceError struct { InstanceId string `json:"instance_id,omitempty"` diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 3e6b1ccbf..5e3771be4 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -48,10 +48,9 @@ type GenieInterface interface { // Generate full query result download. // - // Initiate full SQL query result download and obtain a `download_id` to track - // the download progress. This call initiates a new SQL execution to generate - // the query result. The result is stored in an external link can be retrieved - // using the [Get Download Full Query + // Initiates a new SQL execution and returns a `download_id` that you can use to + // track the progress of the download. The query result is stored in an external + // link and can be retrieved using the [Get Download Full Query // Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks // strongly recommends that you protect the URLs that are returned by the // `EXTERNAL_LINKS` disposition. See [Execute @@ -62,16 +61,13 @@ type GenieInterface interface { // // After [Generating a Full Query Result // Download](:method:genie/getdownloadfullqueryresult) and successfully - // receiving a `download_id`, use this API to Poll download progress and - // retrieve the SQL query result external link(s) upon completion. Warning: - // Databricks strongly recommends that you protect the URLs that are returned by - // the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` - // disposition, a short-lived, presigned URL is generated, which can be used to - // download the results directly from Amazon S3. As a short-lived access - // credential is embedded in this presigned URL, you should protect the URL. - // Because presigned URLs are already generated with embedded temporary access - // credentials, you must not set an Authorization header in the download - // requests. See [Execute + // receiving a `download_id`, use this API to poll the download progress. When + // the download is complete, the API returns one or more external links to the + // query result files. Warning: Databricks strongly recommends that you protect + // the URLs that are returned by the `EXTERNAL_LINKS` disposition. You must not + // set an Authorization header in download requests. When using the + // `EXTERNAL_LINKS` disposition, Databricks returns presigned URLs that grant + // temporary access to data. See [Execute // Statement](:method:statementexecution/executestatement) for more details. GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) @@ -79,16 +75,13 @@ type GenieInterface interface { // // After [Generating a Full Query Result // Download](:method:genie/getdownloadfullqueryresult) and successfully - // receiving a `download_id`, use this API to Poll download progress and - // retrieve the SQL query result external link(s) upon completion. Warning: - // Databricks strongly recommends that you protect the URLs that are returned by - // the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` - // disposition, a short-lived, presigned URL is generated, which can be used to - // download the results directly from Amazon S3. As a short-lived access - // credential is embedded in this presigned URL, you should protect the URL. - // Because presigned URLs are already generated with embedded temporary access - // credentials, you must not set an Authorization header in the download - // requests. See [Execute + // receiving a `download_id`, use this API to poll the download progress. When + // the download is complete, the API returns one or more external links to the + // query result files. Warning: Databricks strongly recommends that you protect + // the URLs that are returned by the `EXTERNAL_LINKS` disposition. You must not + // set an Authorization header in download requests. When using the + // `EXTERNAL_LINKS` disposition, Databricks returns presigned URLs that grant + // temporary access to data. See [Execute // Statement](:method:statementexecution/executestatement) for more details. GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*GenieGetDownloadFullQueryResultResponse, error) @@ -297,16 +290,13 @@ func (a *GenieAPI) CreateMessageAndWait(ctx context.Context, genieCreateConversa // // After [Generating a Full Query Result // Download](:method:genie/getdownloadfullqueryresult) and successfully -// receiving a `download_id`, use this API to Poll download progress and -// retrieve the SQL query result external link(s) upon completion. Warning: -// Databricks strongly recommends that you protect the URLs that are returned by -// the `EXTERNAL_LINKS` disposition. When you use the `EXTERNAL_LINKS` -// disposition, a short-lived, presigned URL is generated, which can be used to -// download the results directly from Amazon S3. As a short-lived access -// credential is embedded in this presigned URL, you should protect the URL. -// Because presigned URLs are already generated with embedded temporary access -// credentials, you must not set an Authorization header in the download -// requests. See [Execute +// receiving a `download_id`, use this API to poll the download progress. When +// the download is complete, the API returns one or more external links to the +// query result files. Warning: Databricks strongly recommends that you protect +// the URLs that are returned by the `EXTERNAL_LINKS` disposition. You must not +// set an Authorization header in download requests. When using the +// `EXTERNAL_LINKS` disposition, Databricks returns presigned URLs that grant +// temporary access to data. See [Execute // Statement](:method:statementexecution/executestatement) for more details. func (a *GenieAPI) GetDownloadFullQueryResultBySpaceIdAndConversationIdAndMessageIdAndAttachmentIdAndDownloadId(ctx context.Context, spaceId string, conversationId string, messageId string, attachmentId string, downloadId string) (*GenieGetDownloadFullQueryResultResponse, error) { return a.genieImpl.GetDownloadFullQueryResult(ctx, GenieGetDownloadFullQueryResultRequest{ diff --git a/service/dashboards/interface.go b/service/dashboards/interface.go index 0df971d5f..ffc8bdc7a 100755 --- a/service/dashboards/interface.go +++ b/service/dashboards/interface.go @@ -33,10 +33,9 @@ type GenieService interface { // Generate full query result download. // - // Initiate full SQL query result download and obtain a `download_id` to - // track the download progress. This call initiates a new SQL execution to - // generate the query result. The result is stored in an external link can - // be retrieved using the [Get Download Full Query + // Initiates a new SQL execution and returns a `download_id` that you can + // use to track the progress of the download. The query result is stored in + // an external link and can be retrieved using the [Get Download Full Query // Result](:method:genie/getdownloadfullqueryresult) API. Warning: // Databricks strongly recommends that you protect the URLs that are // returned by the `EXTERNAL_LINKS` disposition. See [Execute @@ -47,16 +46,13 @@ type GenieService interface { // // After [Generating a Full Query Result // Download](:method:genie/getdownloadfullqueryresult) and successfully - // receiving a `download_id`, use this API to Poll download progress and - // retrieve the SQL query result external link(s) upon completion. Warning: - // Databricks strongly recommends that you protect the URLs that are - // returned by the `EXTERNAL_LINKS` disposition. When you use the - // `EXTERNAL_LINKS` disposition, a short-lived, presigned URL is generated, - // which can be used to download the results directly from Amazon S3. As a - // short-lived access credential is embedded in this presigned URL, you - // should protect the URL. Because presigned URLs are already generated with - // embedded temporary access credentials, you must not set an Authorization - // header in the download requests. See [Execute + // receiving a `download_id`, use this API to poll the download progress. + // When the download is complete, the API returns one or more external links + // to the query result files. Warning: Databricks strongly recommends that + // you protect the URLs that are returned by the `EXTERNAL_LINKS` + // disposition. You must not set an Authorization header in download + // requests. When using the `EXTERNAL_LINKS` disposition, Databricks returns + // presigned URLs that grant temporary access to data. See [Execute // Statement](:method:statementexecution/executestatement) for more details. GetDownloadFullQueryResult(ctx context.Context, request GenieGetDownloadFullQueryResultRequest) (*GenieGetDownloadFullQueryResultResponse, error) diff --git a/service/dashboards/model.go b/service/dashboards/model.go index b9b9b168a..a3f936531 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -354,7 +354,7 @@ type GenieGenerateDownloadFullQueryResultRequest struct { ConversationId string `json:"-" url:"-"` // Message ID MessageId string `json:"-" url:"-"` - // Space ID + // Genie space ID SpaceId string `json:"-" url:"-"` } @@ -397,7 +397,7 @@ type GenieGetDownloadFullQueryResultRequest struct { DownloadId string `json:"-" url:"-"` // Message ID MessageId string `json:"-" url:"-"` - // Space ID + // Genie space ID SpaceId string `json:"-" url:"-"` } @@ -557,7 +557,7 @@ func (s GenieResultMetadata) MarshalJSON() ([]byte, error) { type GenieSpace struct { // Description of the Genie Space Description string `json:"description,omitempty"` - // Space ID + // Genie space ID SpaceId string `json:"space_id"` // Title of the Genie Space Title string `json:"title"` diff --git a/service/jobs/model.go b/service/jobs/model.go index eb7628933..e3e23e7a8 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -361,10 +361,12 @@ func (f *CleanRoomTaskRunResultState) Type() string { // Stores the run state of the clean rooms notebook task. type CleanRoomTaskRunState struct { // A value indicating the run's current lifecycle state. This field is - // always available in the response. + // always available in the response. Note: Additional states might be + // introduced in future releases. LifeCycleState CleanRoomTaskRunLifeCycleState `json:"life_cycle_state,omitempty"` // A value indicating the run's result. This field is only available for - // terminal lifecycle states. + // terminal lifecycle states. Note: Additional states might be introduced in + // future releases. ResultState CleanRoomTaskRunResultState `json:"result_state,omitempty"` } @@ -753,10 +755,13 @@ func (s DashboardPageSnapshot) MarshalJSON() ([]byte, error) { // Configures the Lakeview Dashboard job task type. type DashboardTask struct { + // The identifier of the dashboard to refresh. DashboardId string `json:"dashboard_id,omitempty"` - + // Optional: subscription configuration for sending the dashboard snapshot. Subscription *Subscription `json:"subscription,omitempty"` - // The warehouse id to execute the dashboard with for the schedule + // Optional: The warehouse id to execute the dashboard with for the + // schedule. If not specified, the default warehouse of the dashboard will + // be used. WarehouseId string `json:"warehouse_id,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -2578,6 +2583,15 @@ type QueueSettings struct { } type RepairHistoryItem struct { + // The actual performance target used by the serverless run during + // execution. This can differ from the client-set performance target on the + // request depending on whether the performance mode is supported by the job + // type. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. + EffectivePerformanceTarget PerformanceTarget `json:"effective_performance_target,omitempty"` // The end time of the (repaired) run. EndTime int64 `json:"end_time,omitempty"` // The ID of the repair. Only returned for the items that represent a repair @@ -2681,6 +2695,15 @@ type RepairRun struct { // [Task parameter variables]: https://docs.databricks.com/jobs.html#parameter-variables // [dbutils.widgets.get]: https://docs.databricks.com/dev-tools/databricks-utils.html NotebookParams map[string]string `json:"notebook_params,omitempty"` + // The performance mode on a serverless job. The performance target + // determines the level of compute performance or cost-efficiency for the + // run. This field overrides the performance target defined on the job + // level. + // + // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * + // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times + // through rapid scaling and optimized cluster performance. + PerformanceTarget PerformanceTarget `json:"performance_target,omitempty"` // Controls whether the pipeline should perform a full refresh PipelineParams *PipelineParams `json:"pipeline_params,omitempty"` @@ -3691,12 +3714,14 @@ func (f *RunResultState) Type() string { // The current state of the run. type RunState struct { // A value indicating the run's current lifecycle state. This field is - // always available in the response. + // always available in the response. Note: Additional states might be + // introduced in future releases. LifeCycleState RunLifeCycleState `json:"life_cycle_state,omitempty"` // The reason indicating why the run was queued. QueueReason string `json:"queue_reason,omitempty"` // A value indicating the run's result. This field is only available for - // terminal lifecycle states. + // terminal lifecycle states. Note: Additional states might be introduced in + // future releases. ResultState RunResultState `json:"result_state,omitempty"` // A descriptive message for the current state. This field is unstructured, // and its exact format is subject to change. @@ -3757,7 +3782,7 @@ type RunTask struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *RunConditionTask `json:"condition_task,omitempty"` - // The task runs a DashboardTask when the `dashboard_task` field is present. + // The task refreshes a dashboard and sends a snapshot to subscribers. DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use @@ -4504,7 +4529,7 @@ type SubmitTask struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` - // The task runs a DashboardTask when the `dashboard_task` field is present. + // The task refreshes a dashboard and sends a snapshot to subscribers. DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use @@ -4621,7 +4646,7 @@ type Subscription struct { CustomSubject string `json:"custom_subject,omitempty"` // When true, the subscription will not send emails. Paused bool `json:"paused,omitempty"` - + // The list of subscribers to send the snapshot of the dashboard to. Subscribers []SubscriptionSubscriber `json:"subscribers,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -4636,8 +4661,11 @@ func (s Subscription) MarshalJSON() ([]byte, error) { } type SubscriptionSubscriber struct { + // A snapshot of the dashboard will be sent to the destination when the + // `destination_id` field is present. DestinationId string `json:"destination_id,omitempty"` - + // A snapshot of the dashboard will be sent to the user's email when the + // `user_name` field is present. UserName string `json:"user_name,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -4689,7 +4717,7 @@ type Task struct { // task does not require a cluster to execute and does not support retries // or notifications. ConditionTask *ConditionTask `json:"condition_task,omitempty"` - // The task runs a DashboardTask when the `dashboard_task` field is present. + // The task refreshes a dashboard and sends a snapshot to subscribers. DashboardTask *DashboardTask `json:"dashboard_task,omitempty"` // The task runs one or more dbt commands when the `dbt_task` field is // present. The dbt task requires both Databricks SQL and the ability to use @@ -4951,7 +4979,8 @@ func (s TaskNotificationSettings) MarshalJSON() ([]byte, error) { // details. * `CLOUD_FAILURE`: The run failed due to a cloud provider issue. // Refer to the state message for further details. * // `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job -// level queue size limit. +// level queue size limit. * `DISABLED`: The run was never executed because it +// was disabled explicitly by the user. // // [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now type TerminationCodeCode string @@ -4975,6 +5004,9 @@ const TerminationCodeCodeClusterError TerminationCodeCode = `CLUSTER_ERROR` // frame. const TerminationCodeCodeClusterRequestLimitExceeded TerminationCodeCode = `CLUSTER_REQUEST_LIMIT_EXCEEDED` +// The run was never executed because it was disabled explicitly by the user. +const TerminationCodeCodeDisabled TerminationCodeCode = `DISABLED` + // The run encountered an error while communicating with the Spark Driver. const TerminationCodeCodeDriverError TerminationCodeCode = `DRIVER_ERROR` @@ -5055,11 +5087,11 @@ func (f *TerminationCodeCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationCodeCode) Set(v string) error { switch v { - case `BUDGET_POLICY_LIMIT_EXCEEDED`, `CANCELED`, `CLOUD_FAILURE`, `CLUSTER_ERROR`, `CLUSTER_REQUEST_LIMIT_EXCEEDED`, `DRIVER_ERROR`, `FEATURE_DISABLED`, `INTERNAL_ERROR`, `INVALID_CLUSTER_REQUEST`, `INVALID_RUN_CONFIGURATION`, `LIBRARY_INSTALLATION_ERROR`, `MAX_CONCURRENT_RUNS_EXCEEDED`, `MAX_JOB_QUEUE_SIZE_EXCEEDED`, `MAX_SPARK_CONTEXTS_EXCEEDED`, `REPOSITORY_CHECKOUT_FAILED`, `RESOURCE_NOT_FOUND`, `RUN_EXECUTION_ERROR`, `SKIPPED`, `STORAGE_ACCESS_ERROR`, `SUCCESS`, `UNAUTHORIZED_ERROR`, `USER_CANCELED`, `WORKSPACE_RUN_LIMIT_EXCEEDED`: + case `BUDGET_POLICY_LIMIT_EXCEEDED`, `CANCELED`, `CLOUD_FAILURE`, `CLUSTER_ERROR`, `CLUSTER_REQUEST_LIMIT_EXCEEDED`, `DISABLED`, `DRIVER_ERROR`, `FEATURE_DISABLED`, `INTERNAL_ERROR`, `INVALID_CLUSTER_REQUEST`, `INVALID_RUN_CONFIGURATION`, `LIBRARY_INSTALLATION_ERROR`, `MAX_CONCURRENT_RUNS_EXCEEDED`, `MAX_JOB_QUEUE_SIZE_EXCEEDED`, `MAX_SPARK_CONTEXTS_EXCEEDED`, `REPOSITORY_CHECKOUT_FAILED`, `RESOURCE_NOT_FOUND`, `RUN_EXECUTION_ERROR`, `SKIPPED`, `STORAGE_ACCESS_ERROR`, `SUCCESS`, `UNAUTHORIZED_ERROR`, `USER_CANCELED`, `WORKSPACE_RUN_LIMIT_EXCEEDED`: *f = TerminationCodeCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "BUDGET_POLICY_LIMIT_EXCEEDED", "CANCELED", "CLOUD_FAILURE", "CLUSTER_ERROR", "CLUSTER_REQUEST_LIMIT_EXCEEDED", "DRIVER_ERROR", "FEATURE_DISABLED", "INTERNAL_ERROR", "INVALID_CLUSTER_REQUEST", "INVALID_RUN_CONFIGURATION", "LIBRARY_INSTALLATION_ERROR", "MAX_CONCURRENT_RUNS_EXCEEDED", "MAX_JOB_QUEUE_SIZE_EXCEEDED", "MAX_SPARK_CONTEXTS_EXCEEDED", "REPOSITORY_CHECKOUT_FAILED", "RESOURCE_NOT_FOUND", "RUN_EXECUTION_ERROR", "SKIPPED", "STORAGE_ACCESS_ERROR", "SUCCESS", "UNAUTHORIZED_ERROR", "USER_CANCELED", "WORKSPACE_RUN_LIMIT_EXCEEDED"`, v) + return fmt.Errorf(`value "%s" is not one of "BUDGET_POLICY_LIMIT_EXCEEDED", "CANCELED", "CLOUD_FAILURE", "CLUSTER_ERROR", "CLUSTER_REQUEST_LIMIT_EXCEEDED", "DISABLED", "DRIVER_ERROR", "FEATURE_DISABLED", "INTERNAL_ERROR", "INVALID_CLUSTER_REQUEST", "INVALID_RUN_CONFIGURATION", "LIBRARY_INSTALLATION_ERROR", "MAX_CONCURRENT_RUNS_EXCEEDED", "MAX_JOB_QUEUE_SIZE_EXCEEDED", "MAX_SPARK_CONTEXTS_EXCEEDED", "REPOSITORY_CHECKOUT_FAILED", "RESOURCE_NOT_FOUND", "RUN_EXECUTION_ERROR", "SKIPPED", "STORAGE_ACCESS_ERROR", "SUCCESS", "UNAUTHORIZED_ERROR", "USER_CANCELED", "WORKSPACE_RUN_LIMIT_EXCEEDED"`, v) } } @@ -5111,7 +5143,9 @@ type TerminationDetails struct { // configuration. Refer to the state message for further details. * // `CLOUD_FAILURE`: The run failed due to a cloud provider issue. Refer to // the state message for further details. * `MAX_JOB_QUEUE_SIZE_EXCEEDED`: - // The run was skipped due to reaching the job level queue size limit. + // The run was skipped due to reaching the job level queue size limit. * + // `DISABLED`: The run was never executed because it was disabled explicitly + // by the user. // // [Link]: https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now Code TerminationCodeCode `json:"code,omitempty"` diff --git a/service/pkg.go b/service/pkg.go index 95dbd2fba..aaefe6bdf 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -52,10 +52,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. -// // - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. // +// - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. @@ -82,7 +82,7 @@ // // - [settings.DisableLegacyFeaturesAPI]: Disable legacy features for new Databricks workspaces. // -// - [settings.EnableExportNotebookAPI]: Controls whether users can export notebooks and files from the Workspace. +// - [settings.EnableExportNotebookAPI]: Controls whether users can export notebooks and files from the Workspace UI. // // - [settings.EnableIpAccessListsAPI]: Controls the enforcement of IP access lists for accessing the account console. // diff --git a/service/settings/api.go b/service/settings/api.go index efbd46945..cb9a7f1ff 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -706,14 +706,14 @@ type DisableLegacyFeaturesAPI struct { type EnableExportNotebookInterface interface { - // Get the Enable Export Notebook setting. + // Get the Notebook and File exporting setting. // - // Gets the Enable Export Notebook setting. + // Gets the Notebook and File exporting setting. GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) - // Update the Enable Export Notebook setting. + // Update the Notebook and File exporting setting. // - // Updates the Enable Export Notebook setting. The model follows eventual + // Updates the Notebook and File exporting setting. The model follows eventual // consistency, which means the get after the update operation might receive // stale values for some time. PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) @@ -727,8 +727,8 @@ func NewEnableExportNotebook(client *client.DatabricksClient) *EnableExportNoteb } } -// Controls whether users can export notebooks and files from the Workspace. By -// default, this setting is enabled. +// Controls whether users can export notebooks and files from the Workspace UI. +// By default, this setting is enabled. type EnableExportNotebookAPI struct { enableExportNotebookImpl } @@ -768,14 +768,14 @@ type EnableIpAccessListsAPI struct { type EnableNotebookTableClipboardInterface interface { - // Get the Enable Notebook Table Clipboard setting. + // Get the Results Table Clipboard features setting. // - // Gets the Enable Notebook Table Clipboard setting. + // Gets the Results Table Clipboard features setting. GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) - // Update the Enable Notebook Table Clipboard setting. + // Update the Results Table Clipboard features setting. // - // Updates the Enable Notebook Table Clipboard setting. The model follows + // Updates the Results Table Clipboard features setting. The model follows // eventual consistency, which means the get after the update operation might // receive stale values for some time. PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) @@ -797,14 +797,14 @@ type EnableNotebookTableClipboardAPI struct { type EnableResultsDownloadingInterface interface { - // Get the Enable Results Downloading setting. + // Get the Notebook results download setting. // - // Gets the Enable Results Downloading setting. + // Gets the Notebook results download setting. GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) - // Update the Enable Results Downloading setting. + // Update the Notebook results download setting. // - // Updates the Enable Results Downloading setting. The model follows eventual + // Updates the Notebook results download setting. The model follows eventual // consistency, which means the get after the update operation might receive // stale values for some time. PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) @@ -1500,8 +1500,8 @@ type SettingsInterface interface { // all DBFS functionality is enabled DisableLegacyDbfs() DisableLegacyDbfsInterface - // Controls whether users can export notebooks and files from the Workspace. - // By default, this setting is enabled. + // Controls whether users can export notebooks and files from the Workspace + // UI. By default, this setting is enabled. EnableExportNotebook() EnableExportNotebookInterface // Controls whether users can copy tabular data to the clipboard via the UI. @@ -1623,8 +1623,8 @@ type SettingsAPI struct { // all DBFS functionality is enabled disableLegacyDbfs DisableLegacyDbfsInterface - // Controls whether users can export notebooks and files from the Workspace. - // By default, this setting is enabled. + // Controls whether users can export notebooks and files from the Workspace + // UI. By default, this setting is enabled. enableExportNotebook EnableExportNotebookInterface // Controls whether users can copy tabular data to the clipboard via the UI. diff --git a/service/settings/interface.go b/service/settings/interface.go index f5faae367..8042e91ae 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -343,20 +343,20 @@ type DisableLegacyFeaturesService interface { Update(ctx context.Context, request UpdateDisableLegacyFeaturesRequest) (*DisableLegacyFeatures, error) } -// Controls whether users can export notebooks and files from the Workspace. By -// default, this setting is enabled. +// Controls whether users can export notebooks and files from the Workspace UI. +// By default, this setting is enabled. type EnableExportNotebookService interface { - // Get the Enable Export Notebook setting. + // Get the Notebook and File exporting setting. // - // Gets the Enable Export Notebook setting. + // Gets the Notebook and File exporting setting. GetEnableExportNotebook(ctx context.Context) (*EnableExportNotebook, error) - // Update the Enable Export Notebook setting. + // Update the Notebook and File exporting setting. // - // Updates the Enable Export Notebook setting. The model follows eventual - // consistency, which means the get after the update operation might receive - // stale values for some time. + // Updates the Notebook and File exporting setting. The model follows + // eventual consistency, which means the get after the update operation + // might receive stale values for some time. PatchEnableExportNotebook(ctx context.Context, request UpdateEnableExportNotebookRequest) (*EnableExportNotebook, error) } @@ -385,14 +385,14 @@ type EnableIpAccessListsService interface { // default, this setting is enabled. type EnableNotebookTableClipboardService interface { - // Get the Enable Notebook Table Clipboard setting. + // Get the Results Table Clipboard features setting. // - // Gets the Enable Notebook Table Clipboard setting. + // Gets the Results Table Clipboard features setting. GetEnableNotebookTableClipboard(ctx context.Context) (*EnableNotebookTableClipboard, error) - // Update the Enable Notebook Table Clipboard setting. + // Update the Results Table Clipboard features setting. // - // Updates the Enable Notebook Table Clipboard setting. The model follows + // Updates the Results Table Clipboard features setting. The model follows // eventual consistency, which means the get after the update operation // might receive stale values for some time. PatchEnableNotebookTableClipboard(ctx context.Context, request UpdateEnableNotebookTableClipboardRequest) (*EnableNotebookTableClipboard, error) @@ -402,16 +402,16 @@ type EnableNotebookTableClipboardService interface { // setting is enabled. type EnableResultsDownloadingService interface { - // Get the Enable Results Downloading setting. + // Get the Notebook results download setting. // - // Gets the Enable Results Downloading setting. + // Gets the Notebook results download setting. GetEnableResultsDownloading(ctx context.Context) (*EnableResultsDownloading, error) - // Update the Enable Results Downloading setting. + // Update the Notebook results download setting. // - // Updates the Enable Results Downloading setting. The model follows - // eventual consistency, which means the get after the update operation - // might receive stale values for some time. + // Updates the Notebook results download setting. The model follows eventual + // consistency, which means the get after the update operation might receive + // stale values for some time. PatchEnableResultsDownloading(ctx context.Context, request UpdateEnableResultsDownloadingRequest) (*EnableResultsDownloading, error) } diff --git a/service/settings/model.go b/service/settings/model.go index 39daaf756..8775aaa34 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -396,6 +396,8 @@ const ComplianceStandardIsmap ComplianceStandard = `ISMAP` const ComplianceStandardItarEar ComplianceStandard = `ITAR_EAR` +const ComplianceStandardKFsi ComplianceStandard = `K_FSI` + const ComplianceStandardNone ComplianceStandard = `NONE` const ComplianceStandardPciDss ComplianceStandard = `PCI_DSS` @@ -408,11 +410,11 @@ func (f *ComplianceStandard) String() string { // Set raw string value and validate it against allowed values func (f *ComplianceStandard) Set(v string) error { switch v { - case `CANADA_PROTECTED_B`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `HITRUST`, `IRAP_PROTECTED`, `ISMAP`, `ITAR_EAR`, `NONE`, `PCI_DSS`: + case `CANADA_PROTECTED_B`, `CYBER_ESSENTIAL_PLUS`, `FEDRAMP_HIGH`, `FEDRAMP_IL5`, `FEDRAMP_MODERATE`, `HIPAA`, `HITRUST`, `IRAP_PROTECTED`, `ISMAP`, `ITAR_EAR`, `K_FSI`, `NONE`, `PCI_DSS`: *f = ComplianceStandard(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "CANADA_PROTECTED_B", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "HITRUST", "IRAP_PROTECTED", "ISMAP", "ITAR_EAR", "NONE", "PCI_DSS"`, v) + return fmt.Errorf(`value "%s" is not one of "CANADA_PROTECTED_B", "CYBER_ESSENTIAL_PLUS", "FEDRAMP_HIGH", "FEDRAMP_IL5", "FEDRAMP_MODERATE", "HIPAA", "HITRUST", "IRAP_PROTECTED", "ISMAP", "ITAR_EAR", "K_FSI", "NONE", "PCI_DSS"`, v) } } diff --git a/service/vectorsearch/api.go b/service/vectorsearch/api.go index c0e3af2fb..ee6304132 100755 --- a/service/vectorsearch/api.go +++ b/service/vectorsearch/api.go @@ -34,26 +34,46 @@ type VectorSearchEndpointsInterface interface { CreateEndpointAndWait(ctx context.Context, createEndpoint CreateEndpoint, options ...retries.Option[EndpointInfo]) (*EndpointInfo, error) // Delete an endpoint. + // + // Delete a vector search endpoint. DeleteEndpoint(ctx context.Context, request DeleteEndpointRequest) error // Delete an endpoint. + // + // Delete a vector search endpoint. DeleteEndpointByEndpointName(ctx context.Context, endpointName string) error // Get an endpoint. + // + // Get details for a single vector search endpoint. GetEndpoint(ctx context.Context, request GetEndpointRequest) (*EndpointInfo, error) // Get an endpoint. + // + // Get details for a single vector search endpoint. GetEndpointByEndpointName(ctx context.Context, endpointName string) (*EndpointInfo, error) // List all endpoints. // + // List all vector search endpoints in the workspace. + // // This method is generated by Databricks SDK Code Generator. ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] // List all endpoints. // + // List all vector search endpoints in the workspace. + // // This method is generated by Databricks SDK Code Generator. ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) + + // Update the budget policy of an endpoint. + // + // Update the budget policy of an endpoint + UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) + + // Update the custom tags of an endpoint. + UpdateEndpointCustomTags(ctx context.Context, request UpdateEndpointCustomTagsRequest) (*UpdateEndpointCustomTagsResponse, error) } func NewVectorSearchEndpoints(client *client.DatabricksClient) *VectorSearchEndpointsAPI { @@ -173,6 +193,8 @@ func (a *VectorSearchEndpointsAPI) CreateEndpointAndWait(ctx context.Context, cr } // Delete an endpoint. +// +// Delete a vector search endpoint. func (a *VectorSearchEndpointsAPI) DeleteEndpointByEndpointName(ctx context.Context, endpointName string) error { return a.vectorSearchEndpointsImpl.DeleteEndpoint(ctx, DeleteEndpointRequest{ EndpointName: endpointName, @@ -180,6 +202,8 @@ func (a *VectorSearchEndpointsAPI) DeleteEndpointByEndpointName(ctx context.Cont } // Get an endpoint. +// +// Get details for a single vector search endpoint. func (a *VectorSearchEndpointsAPI) GetEndpointByEndpointName(ctx context.Context, endpointName string) (*EndpointInfo, error) { return a.vectorSearchEndpointsImpl.GetEndpoint(ctx, GetEndpointRequest{ EndpointName: endpointName, @@ -191,13 +215,18 @@ type VectorSearchIndexesInterface interface { // Create an index. // // Create a new index. - CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) + CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*VectorIndex, error) // Delete data from index. // // Handles the deletion of data from a specified vector index. DeleteDataVectorIndex(ctx context.Context, request DeleteDataVectorIndexRequest) (*DeleteDataVectorIndexResponse, error) + // Delete data from index. + // + // Handles the deletion of data from a specified vector index. + DeleteDataVectorIndexByIndexName(ctx context.Context, indexName string) (*DeleteDataVectorIndexResponse, error) + // Delete an index. // // Delete an index. @@ -272,16 +301,25 @@ func NewVectorSearchIndexes(client *client.DatabricksClient) *VectorSearchIndexe // supports real-time and efficient approximate nearest neighbor (ANN) search // queries. // -// There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index +// There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index // that automatically syncs with a source Delta Table, automatically and // incrementally updating the index as the underlying data in the Delta Table -// changes. * **Direct Vector Access Index**: An index that supports direct read +// changes. - **Direct Vector Access Index**: An index that supports direct read // and write of vectors and metadata through our REST and SDK APIs. With this // model, the user manages index updates. type VectorSearchIndexesAPI struct { vectorSearchIndexesImpl } +// Delete data from index. +// +// Handles the deletion of data from a specified vector index. +func (a *VectorSearchIndexesAPI) DeleteDataVectorIndexByIndexName(ctx context.Context, indexName string) (*DeleteDataVectorIndexResponse, error) { + return a.vectorSearchIndexesImpl.DeleteDataVectorIndex(ctx, DeleteDataVectorIndexRequest{ + IndexName: indexName, + }) +} + // Delete an index. // // Delete an index. diff --git a/service/vectorsearch/impl.go b/service/vectorsearch/impl.go index 1deb7eea3..a11abb569 100755 --- a/service/vectorsearch/impl.go +++ b/service/vectorsearch/impl.go @@ -33,6 +33,7 @@ func (a *vectorSearchEndpointsImpl) DeleteEndpoint(ctx context.Context, request path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v", request.EndpointName) queryParams := make(map[string]any) headers := make(map[string]string) + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteEndpointResponse) return err } @@ -48,6 +49,8 @@ func (a *vectorSearchEndpointsImpl) GetEndpoint(ctx context.Context, request Get } // List all endpoints. +// +// List all vector search endpoints in the workspace. func (a *vectorSearchEndpointsImpl) ListEndpoints(ctx context.Context, request ListEndpointsRequest) listing.Iterator[EndpointInfo] { getNextPage := func(ctx context.Context, req ListEndpointsRequest) (*ListEndpointResponse, error) { @@ -73,6 +76,8 @@ func (a *vectorSearchEndpointsImpl) ListEndpoints(ctx context.Context, request L } // List all endpoints. +// +// List all vector search endpoints in the workspace. func (a *vectorSearchEndpointsImpl) ListEndpointsAll(ctx context.Context, request ListEndpointsRequest) ([]EndpointInfo, error) { iterator := a.ListEndpoints(ctx, request) return listing.ToSlice[EndpointInfo](ctx, iterator) @@ -88,20 +93,42 @@ func (a *vectorSearchEndpointsImpl) internalListEndpoints(ctx context.Context, r return &listEndpointResponse, err } +func (a *vectorSearchEndpointsImpl) UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) { + var patchEndpointBudgetPolicyResponse PatchEndpointBudgetPolicyResponse + path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v/budget-policy", request.EndpointName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &patchEndpointBudgetPolicyResponse) + return &patchEndpointBudgetPolicyResponse, err +} + +func (a *vectorSearchEndpointsImpl) UpdateEndpointCustomTags(ctx context.Context, request UpdateEndpointCustomTagsRequest) (*UpdateEndpointCustomTagsResponse, error) { + var updateEndpointCustomTagsResponse UpdateEndpointCustomTagsResponse + path := fmt.Sprintf("/api/2.0/vector-search/endpoints/%v/tags", request.EndpointName) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &updateEndpointCustomTagsResponse) + return &updateEndpointCustomTagsResponse, err +} + // unexported type that holds implementations of just VectorSearchIndexes API methods type vectorSearchIndexesImpl struct { client *client.DatabricksClient } -func (a *vectorSearchIndexesImpl) CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) { - var createVectorIndexResponse CreateVectorIndexResponse +func (a *vectorSearchIndexesImpl) CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*VectorIndex, error) { + var vectorIndex VectorIndex path := "/api/2.0/vector-search/indexes" queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &createVectorIndexResponse) - return &createVectorIndexResponse, err + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &vectorIndex) + return &vectorIndex, err } func (a *vectorSearchIndexesImpl) DeleteDataVectorIndex(ctx context.Context, request DeleteDataVectorIndexRequest) (*DeleteDataVectorIndexResponse, error) { @@ -110,8 +137,7 @@ func (a *vectorSearchIndexesImpl) DeleteDataVectorIndex(ctx context.Context, req queryParams := make(map[string]any) headers := make(map[string]string) headers["Accept"] = "application/json" - headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &deleteDataVectorIndexResponse) + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteDataVectorIndexResponse) return &deleteDataVectorIndexResponse, err } @@ -120,6 +146,7 @@ func (a *vectorSearchIndexesImpl) DeleteIndex(ctx context.Context, request Delet path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v", request.IndexName) queryParams := make(map[string]any) headers := make(map[string]string) + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &deleteIndexResponse) return err } @@ -217,6 +244,7 @@ func (a *vectorSearchIndexesImpl) SyncIndex(ctx context.Context, request SyncInd path := fmt.Sprintf("/api/2.0/vector-search/indexes/%v/sync", request.IndexName) queryParams := make(map[string]any) headers := make(map[string]string) + headers["Accept"] = "application/json" err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, nil, &syncIndexResponse) return err } diff --git a/service/vectorsearch/interface.go b/service/vectorsearch/interface.go index 326f552c8..e165c8042 100755 --- a/service/vectorsearch/interface.go +++ b/service/vectorsearch/interface.go @@ -15,25 +15,39 @@ type VectorSearchEndpointsService interface { CreateEndpoint(ctx context.Context, request CreateEndpoint) (*EndpointInfo, error) // Delete an endpoint. + // + // Delete a vector search endpoint. DeleteEndpoint(ctx context.Context, request DeleteEndpointRequest) error // Get an endpoint. + // + // Get details for a single vector search endpoint. GetEndpoint(ctx context.Context, request GetEndpointRequest) (*EndpointInfo, error) // List all endpoints. // + // List all vector search endpoints in the workspace. + // // Use ListEndpointsAll() to get all EndpointInfo instances, which will iterate over every result page. ListEndpoints(ctx context.Context, request ListEndpointsRequest) (*ListEndpointResponse, error) + + // Update the budget policy of an endpoint. + // + // Update the budget policy of an endpoint + UpdateEndpointBudgetPolicy(ctx context.Context, request PatchEndpointBudgetPolicyRequest) (*PatchEndpointBudgetPolicyResponse, error) + + // Update the custom tags of an endpoint. + UpdateEndpointCustomTags(ctx context.Context, request UpdateEndpointCustomTagsRequest) (*UpdateEndpointCustomTagsResponse, error) } // **Index**: An efficient representation of your embedding vectors that // supports real-time and efficient approximate nearest neighbor (ANN) search // queries. // -// There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index +// There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index // that automatically syncs with a source Delta Table, automatically and // incrementally updating the index as the underlying data in the Delta Table -// changes. * **Direct Vector Access Index**: An index that supports direct read +// changes. - **Direct Vector Access Index**: An index that supports direct read // and write of vectors and metadata through our REST and SDK APIs. With this // model, the user manages index updates. type VectorSearchIndexesService interface { @@ -41,7 +55,7 @@ type VectorSearchIndexesService interface { // Create an index. // // Create a new index. - CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*CreateVectorIndexResponse, error) + CreateIndex(ctx context.Context, request CreateVectorIndexRequest) (*VectorIndex, error) // Delete data from index. // diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index 4e5abe555..d9727496d 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -24,10 +24,22 @@ func (s ColumnInfo) MarshalJSON() ([]byte, error) { } type CreateEndpoint struct { - // Type of endpoint. + // The budget policy id to be applied + BudgetPolicyId string `json:"budget_policy_id,omitempty"` + // Type of endpoint EndpointType EndpointType `json:"endpoint_type"` - // Name of endpoint + // Name of the vector search endpoint Name string `json:"name"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CreateEndpoint) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateEndpoint) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type CreateVectorIndexRequest struct { @@ -39,13 +51,12 @@ type CreateVectorIndexRequest struct { DirectAccessIndexSpec *DirectAccessVectorIndexSpec `json:"direct_access_index_spec,omitempty"` // Name of the endpoint to be used for serving the index EndpointName string `json:"endpoint_name"` - // There are 2 types of Vector Search indexes: - // - // - `DELTA_SYNC`: An index that automatically syncs with a source Delta - // Table, automatically and incrementally updating the index as the - // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index - // that supports direct read and write of vectors and metadata through our - // REST and SDK APIs. With this model, the user manages index updates. + // There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that + // automatically syncs with a source Delta Table, automatically and + // incrementally updating the index as the underlying data in the Delta + // Table changes. - `DIRECT_ACCESS`: An index that supports direct read and + // write of vectors and metadata through our REST and SDK APIs. With this + // model, the user manages index updates. IndexType VectorIndexType `json:"index_type"` // Name of the index Name string `json:"name"` @@ -53,11 +64,23 @@ type CreateVectorIndexRequest struct { PrimaryKey string `json:"primary_key"` } -type CreateVectorIndexResponse struct { - VectorIndex *VectorIndex `json:"vector_index,omitempty"` +type CustomTag struct { + // Key field for a vector search endpoint tag. + Key string `json:"key"` + // [Optional] Value field for a vector search endpoint tag. + Value string `json:"value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CustomTag) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CustomTag) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } -// Result of the upsert or delete operation. type DeleteDataResult struct { // List of primary keys for rows that failed to process. FailedPrimaryKeys []string `json:"failed_primary_keys,omitempty"` @@ -75,7 +98,6 @@ func (s DeleteDataResult) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Status of the delete operation. type DeleteDataStatus string const DeleteDataStatusFailure DeleteDataStatus = `FAILURE` @@ -105,16 +127,15 @@ func (f *DeleteDataStatus) Type() string { return "DeleteDataStatus" } -// Request payload for deleting data from a vector index. +// Delete data from index type DeleteDataVectorIndexRequest struct { // Name of the vector index where data is to be deleted. Must be a Direct // Vector Access Index. IndexName string `json:"-" url:"-"` // List of primary keys for the data to be deleted. - PrimaryKeys []string `json:"primary_keys"` + PrimaryKeys []string `json:"-" url:"primary_keys"` } -// Response to a delete data vector index request. type DeleteDataVectorIndexResponse struct { // Result of the upsert or delete operation. Result *DeleteDataResult `json:"result,omitempty"` @@ -124,7 +145,7 @@ type DeleteDataVectorIndexResponse struct { // Delete an endpoint type DeleteEndpointRequest struct { - // Name of the endpoint + // Name of the vector search endpoint EndpointName string `json:"-" url:"-"` } @@ -148,21 +169,18 @@ type DeltaSyncVectorIndexSpecRequest struct { ColumnsToSync []string `json:"columns_to_sync,omitempty"` // The columns that contain the embedding source. EmbeddingSourceColumns []EmbeddingSourceColumn `json:"embedding_source_columns,omitempty"` - // The columns that contain the embedding vectors. The format should be - // array[double]. + // The columns that contain the embedding vectors. EmbeddingVectorColumns []EmbeddingVectorColumn `json:"embedding_vector_columns,omitempty"` - // [Optional] Automatically sync the vector index contents and computed - // embeddings to the specified Delta table. The only supported table name is - // the index name with the suffix `_writeback_table`. + // [Optional] Name of the Delta table to sync the vector index contents and + // computed embeddings to. EmbeddingWritebackTable string `json:"embedding_writeback_table,omitempty"` - // Pipeline execution mode. - // - // - `TRIGGERED`: If the pipeline uses the triggered execution mode, the - // system stops processing after successfully refreshing the source table in - // the pipeline once, ensuring the table is updated based on the data - // available when the update started. - `CONTINUOUS`: If the pipeline uses - // continuous execution, the pipeline processes new data as it arrives in - // the source table to keep vector index fresh. + // Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the + // triggered execution mode, the system stops processing after successfully + // refreshing the source table in the pipeline once, ensuring the table is + // updated based on the data available when the update started. - + // `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline + // processes new data as it arrives in the source table to keep vector index + // fresh. PipelineType PipelineType `json:"pipeline_type,omitempty"` // The name of the source table. SourceTable string `json:"source_table,omitempty"` @@ -188,14 +206,13 @@ type DeltaSyncVectorIndexSpecResponse struct { EmbeddingWritebackTable string `json:"embedding_writeback_table,omitempty"` // The ID of the pipeline that is used to sync the index. PipelineId string `json:"pipeline_id,omitempty"` - // Pipeline execution mode. - // - // - `TRIGGERED`: If the pipeline uses the triggered execution mode, the - // system stops processing after successfully refreshing the source table in - // the pipeline once, ensuring the table is updated based on the data - // available when the update started. - `CONTINUOUS`: If the pipeline uses - // continuous execution, the pipeline processes new data as it arrives in - // the source table to keep vector index fresh. + // Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the + // triggered execution mode, the system stops processing after successfully + // refreshing the source table in the pipeline once, ensuring the table is + // updated based on the data available when the update started. - + // `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline + // processes new data as it arrives in the source table to keep vector index + // fresh. PipelineType PipelineType `json:"pipeline_type,omitempty"` // The name of the source table. SourceTable string `json:"source_table,omitempty"` @@ -212,15 +229,14 @@ func (s DeltaSyncVectorIndexSpecResponse) MarshalJSON() ([]byte, error) { } type DirectAccessVectorIndexSpec struct { - // Contains the optional model endpoint to use during query time. + // The columns that contain the embedding source. The format should be + // array[double]. EmbeddingSourceColumns []EmbeddingSourceColumn `json:"embedding_source_columns,omitempty"` - + // The columns that contain the embedding vectors. The format should be + // array[double]. EmbeddingVectorColumns []EmbeddingVectorColumn `json:"embedding_vector_columns,omitempty"` - // The schema of the index in JSON format. - // - // Supported types are `integer`, `long`, `float`, `double`, `boolean`, - // `string`, `date`, `timestamp`. - // + // The schema of the index in JSON format. Supported types are `integer`, + // `long`, `float`, `double`, `boolean`, `string`, `date`, `timestamp`. // Supported types for vector column: `array`, `array`,`. SchemaJson string `json:"schema_json,omitempty"` @@ -274,9 +290,13 @@ type EndpointInfo struct { CreationTimestamp int64 `json:"creation_timestamp,omitempty"` // Creator of the endpoint Creator string `json:"creator,omitempty"` + // The custom tags assigned to the endpoint + CustomTags []CustomTag `json:"custom_tags,omitempty"` + // The budget policy id applied to the endpoint + EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` // Current status of the endpoint EndpointStatus *EndpointStatus `json:"endpoint_status,omitempty"` - // Type of endpoint. + // Type of endpoint EndpointType EndpointType `json:"endpoint_type,omitempty"` // Unique identifier of the endpoint Id string `json:"id,omitempty"` @@ -284,7 +304,7 @@ type EndpointInfo struct { LastUpdatedTimestamp int64 `json:"last_updated_timestamp,omitempty"` // User who last updated the endpoint LastUpdatedUser string `json:"last_updated_user,omitempty"` - // Name of endpoint + // Name of the vector search endpoint Name string `json:"name,omitempty"` // Number of indexes on the endpoint NumIndexes int `json:"num_indexes,omitempty"` @@ -438,7 +458,13 @@ func (s ListIndexesRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// copied from proto3 / Google Well Known Types, source: +// https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. type ListValue struct { + // Repeated field of dynamically typed values. Values []Value `json:"values,omitempty"` } @@ -483,13 +509,12 @@ type MiniVectorIndex struct { Creator string `json:"creator,omitempty"` // Name of the endpoint associated with the index EndpointName string `json:"endpoint_name,omitempty"` - // There are 2 types of Vector Search indexes: - // - // - `DELTA_SYNC`: An index that automatically syncs with a source Delta - // Table, automatically and incrementally updating the index as the - // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index - // that supports direct read and write of vectors and metadata through our - // REST and SDK APIs. With this model, the user manages index updates. + // There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that + // automatically syncs with a source Delta Table, automatically and + // incrementally updating the index as the underlying data in the Delta + // Table changes. - `DIRECT_ACCESS`: An index that supports direct read and + // write of vectors and metadata through our REST and SDK APIs. With this + // model, the user manages index updates. IndexType VectorIndexType `json:"index_type,omitempty"` // Name of the index Name string `json:"name,omitempty"` @@ -507,14 +532,34 @@ func (s MiniVectorIndex) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Pipeline execution mode. -// -// - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system -// stops processing after successfully refreshing the source table in the -// pipeline once, ensuring the table is updated based on the data available when -// the update started. - `CONTINUOUS`: If the pipeline uses continuous -// execution, the pipeline processes new data as it arrives in the source table -// to keep vector index fresh. +type PatchEndpointBudgetPolicyRequest struct { + // The budget policy id to be applied + BudgetPolicyId string `json:"budget_policy_id"` + // Name of the vector search endpoint + EndpointName string `json:"-" url:"-"` +} + +type PatchEndpointBudgetPolicyResponse struct { + // The budget policy applied to the vector search endpoint. + EffectiveBudgetPolicyId string `json:"effective_budget_policy_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *PatchEndpointBudgetPolicyResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s PatchEndpointBudgetPolicyResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +// Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered +// execution mode, the system stops processing after successfully refreshing the +// source table in the pipeline once, ensuring the table is updated based on the +// data available when the update started. - `CONTINUOUS`: If the pipeline uses +// continuous execution, the pipeline processes new data as it arrives in the +// source table to keep vector index fresh. type PipelineType string // If the pipeline uses continuous execution, the pipeline processes new data as @@ -576,10 +621,12 @@ type QueryVectorIndexRequest struct { ColumnsToRerank []string `json:"columns_to_rerank,omitempty"` // JSON string representing query filters. // - // Example filters: - `{"id <": 5}`: Filter for id less than 5. - `{"id >": - // 5}`: Filter for id greater than 5. - `{"id <=": 5}`: Filter for id less - // than equal to 5. - `{"id >=": 5}`: Filter for id greater than equal to 5. - // - `{"id": 5}`: Filter for id equal to 5. + // Example filters: + // + // - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for + // id greater than 5. - `{"id <=": 5}`: Filter for id less than equal to 5. + // - `{"id >=": 5}`: Filter for id greater than equal to 5. - `{"id": 5}`: + // Filter for id equal to 5. FiltersJson string `json:"filters_json,omitempty"` // Name of the vector index to query. IndexName string `json:"-" url:"-"` @@ -631,7 +678,7 @@ func (s QueryVectorIndexResponse) MarshalJSON() ([]byte, error) { // Data returned in the query result. type ResultData struct { // Data rows returned in the query. - DataArray [][]string `json:"data_array,omitempty"` + DataArray []ListValue `json:"data_array,omitempty"` // Number of rows in the result set. RowCount int `json:"row_count,omitempty"` @@ -664,7 +711,6 @@ func (s ResultManifest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Request payload for scanning data from a vector index. type ScanVectorIndexRequest struct { // Name of the vector index to scan. IndexName string `json:"-" url:"-"` @@ -702,6 +748,15 @@ func (s ScanVectorIndexResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// copied from proto3 / Google Well Known Types, source: +// https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto +// `Struct` represents a structured data value, consisting of fields which map +// to dynamically typed values. In some languages, `Struct` might be supported +// by a native representation. For example, in scripting languages like JS a +// struct is represented as an object. The details of that representation are +// described together with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. type Struct struct { // Data entry, corresponding to a row in a vector index. Fields []MapStringValueEntry `json:"fields,omitempty"` @@ -716,7 +771,30 @@ type SyncIndexRequest struct { type SyncIndexResponse struct { } -// Result of the upsert or delete operation. +type UpdateEndpointCustomTagsRequest struct { + // The new custom tags for the vector search endpoint + CustomTags []CustomTag `json:"custom_tags"` + // Name of the vector search endpoint + EndpointName string `json:"-" url:"-"` +} + +type UpdateEndpointCustomTagsResponse struct { + // All the custom tags that are applied to the vector search endpoint. + CustomTags []CustomTag `json:"custom_tags,omitempty"` + // The name of the vector search endpoint whose custom tags were updated. + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *UpdateEndpointCustomTagsResponse) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s UpdateEndpointCustomTagsResponse) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + type UpsertDataResult struct { // List of primary keys for rows that failed to process. FailedPrimaryKeys []string `json:"failed_primary_keys,omitempty"` @@ -734,7 +812,6 @@ func (s UpsertDataResult) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Status of the upsert operation. type UpsertDataStatus string const UpsertDataStatusFailure UpsertDataStatus = `FAILURE` @@ -764,7 +841,6 @@ func (f *UpsertDataStatus) Type() string { return "UpsertDataStatus" } -// Request payload for upserting data into a vector index. type UpsertDataVectorIndexRequest struct { // Name of the vector index where data is to be upserted. Must be a Direct // Vector Access Index. @@ -773,7 +849,6 @@ type UpsertDataVectorIndexRequest struct { InputsJson string `json:"inputs_json"` } -// Response to an upsert data vector index request. type UpsertDataVectorIndexResponse struct { // Result of the upsert or delete operation. Result *UpsertDataResult `json:"result,omitempty"` @@ -783,15 +858,26 @@ type UpsertDataVectorIndexResponse struct { type Value struct { BoolValue bool `json:"bool_value,omitempty"` - + // copied from proto3 / Google Well Known Types, source: + // https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto + // `ListValue` is a wrapper around a repeated field of values. + // + // The JSON representation for `ListValue` is JSON array. ListValue *ListValue `json:"list_value,omitempty"` - NullValue string `json:"null_value,omitempty"` - NumberValue float64 `json:"number_value,omitempty"` StringValue string `json:"string_value,omitempty"` - + // copied from proto3 / Google Well Known Types, source: + // https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto + // `Struct` represents a structured data value, consisting of fields which + // map to dynamically typed values. In some languages, `Struct` might be + // supported by a native representation. For example, in scripting languages + // like JS a struct is represented as an object. The details of that + // representation are described together with the proto support for the + // language. + // + // The JSON representation for `Struct` is JSON object. StructValue *Struct `json:"struct_value,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -814,13 +900,12 @@ type VectorIndex struct { DirectAccessIndexSpec *DirectAccessVectorIndexSpec `json:"direct_access_index_spec,omitempty"` // Name of the endpoint associated with the index EndpointName string `json:"endpoint_name,omitempty"` - // There are 2 types of Vector Search indexes: - // - // - `DELTA_SYNC`: An index that automatically syncs with a source Delta - // Table, automatically and incrementally updating the index as the - // underlying data in the Delta Table changes. - `DIRECT_ACCESS`: An index - // that supports direct read and write of vectors and metadata through our - // REST and SDK APIs. With this model, the user manages index updates. + // There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that + // automatically syncs with a source Delta Table, automatically and + // incrementally updating the index as the underlying data in the Delta + // Table changes. - `DIRECT_ACCESS`: An index that supports direct read and + // write of vectors and metadata through our REST and SDK APIs. With this + // model, the user manages index updates. IndexType VectorIndexType `json:"index_type,omitempty"` // Name of the index Name string `json:"name,omitempty"` @@ -861,13 +946,12 @@ func (s VectorIndexStatus) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// There are 2 types of Vector Search indexes: -// -// - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, -// automatically and incrementally updating the index as the underlying data in -// the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct -// read and write of vectors and metadata through our REST and SDK APIs. With -// this model, the user manages index updates. +// There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that +// automatically syncs with a source Delta Table, automatically and +// incrementally updating the index as the underlying data in the Delta Table +// changes. - `DIRECT_ACCESS`: An index that supports direct read and write of +// vectors and metadata through our REST and SDK APIs. With this model, the user +// manages index updates. type VectorIndexType string // An index that automatically syncs with a source Delta Table, automatically diff --git a/workspace_client.go b/workspace_client.go index 736262051..13cd2134f 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -1075,10 +1075,10 @@ type WorkspaceClient struct { // supports real-time and efficient approximate nearest neighbor (ANN) // search queries. // - // There are 2 types of Vector Search indexes: * **Delta Sync Index**: An + // There are 2 types of Vector Search indexes: - **Delta Sync Index**: An // index that automatically syncs with a source Delta Table, automatically // and incrementally updating the index as the underlying data in the Delta - // Table changes. * **Direct Vector Access Index**: An index that supports + // Table changes. - **Direct Vector Access Index**: An index that supports // direct read and write of vectors and metadata through our REST and SDK // APIs. With this model, the user manages index updates. VectorSearchIndexes vectorsearch.VectorSearchIndexesInterface From 8027ec932413c18e7d2e586f7165e551194438e7 Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Thu, 24 Apr 2025 18:26:00 +0000 Subject: [PATCH 43/54] [Release] Release v0.64.0 ## Release v0.64.0 ### New Features and Improvements * Enabled asynchronous token refreshes by default ([#1208](https://github.com/databricks/databricks-sdk-go/pull/1208)). ### API Changes * Added `UpdateEndpointBudgetPolicy` and `UpdateEndpointCustomTags` methods for [w.VectorSearchEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI) workspace-level service. * Added `NodeTypeFlexibility` field for [compute.EditInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EditInstancePool). * Added `PageSize` and `PageToken` fields for [compute.GetEvents](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEvents). * Added `NextPageToken` and `PrevPageToken` fields for [compute.GetEventsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEventsResponse). * Added `NodeTypeFlexibility` field for [compute.GetInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetInstancePool). * Added `NodeTypeFlexibility` field for [compute.InstancePoolAndStats](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InstancePoolAndStats). * Added `EffectivePerformanceTarget` field for [jobs.RepairHistoryItem](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairHistoryItem). * Added `PerformanceTarget` field for [jobs.RepairRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairRun). * Added `BudgetPolicyId` field for [vectorsearch.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#CreateEndpoint). * Added `CustomTags` and `EffectiveBudgetPolicyId` fields for [vectorsearch.EndpointInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#EndpointInfo). * Added `Disabled` enum value for [jobs.TerminationCodeCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TerminationCodeCode). * [Breaking] Changed `CreateIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service to return [vectorsearch.VectorIndex](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorIndex). * [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service . HTTP method/verb has changed. * [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service with new required argument order. * [Breaking] Changed `DataArray` field for [vectorsearch.ResultData](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ResultData) to type [vectorsearch.ListValueList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValueList). * [Breaking] Changed waiter for [VectorSearchEndpointsAPI.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI.CreateEndpoint). * [Breaking] Removed `NullValue` field for [vectorsearch.Value](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#Value). --- .release_metadata.json | 2 +- CHANGELOG.md | 25 +++++++++++++++++++++++++ NEXT_CHANGELOG.md | 20 +------------------- version/version.go | 2 +- 4 files changed, 28 insertions(+), 21 deletions(-) diff --git a/.release_metadata.json b/.release_metadata.json index a7b6137c2..008a2f2a7 100644 --- a/.release_metadata.json +++ b/.release_metadata.json @@ -1,3 +1,3 @@ { - "timestamp": "2025-04-14 14:27:08+0000" + "timestamp": "2025-04-24 18:25:56+0000" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 946ad577e..36644701b 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Version changelog +## Release v0.64.0 + +### New Features and Improvements +* Enabled asynchronous token refreshes by default ([#1208](https://github.com/databricks/databricks-sdk-go/pull/1208)). + +### API Changes +* Added `UpdateEndpointBudgetPolicy` and `UpdateEndpointCustomTags` methods for [w.VectorSearchEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI) workspace-level service. +* Added `NodeTypeFlexibility` field for [compute.EditInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EditInstancePool). +* Added `PageSize` and `PageToken` fields for [compute.GetEvents](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEvents). +* Added `NextPageToken` and `PrevPageToken` fields for [compute.GetEventsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEventsResponse). +* Added `NodeTypeFlexibility` field for [compute.GetInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetInstancePool). +* Added `NodeTypeFlexibility` field for [compute.InstancePoolAndStats](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InstancePoolAndStats). +* Added `EffectivePerformanceTarget` field for [jobs.RepairHistoryItem](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairHistoryItem). +* Added `PerformanceTarget` field for [jobs.RepairRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairRun). +* Added `BudgetPolicyId` field for [vectorsearch.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#CreateEndpoint). +* Added `CustomTags` and `EffectiveBudgetPolicyId` fields for [vectorsearch.EndpointInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#EndpointInfo). +* Added `Disabled` enum value for [jobs.TerminationCodeCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TerminationCodeCode). +* [Breaking] Changed `CreateIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service to return [vectorsearch.VectorIndex](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorIndex). +* [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service . HTTP method/verb has changed. +* [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service with new required argument order. +* [Breaking] Changed `DataArray` field for [vectorsearch.ResultData](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ResultData) to type [vectorsearch.ListValueList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValueList). +* [Breaking] Changed waiter for [VectorSearchEndpointsAPI.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI.CreateEndpoint). +* [Breaking] Removed `NullValue` field for [vectorsearch.Value](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#Value). + + ## Release v0.63.0 ### API Changes diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index d7c78f201..a80f60d2b 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,9 +1,8 @@ # NEXT CHANGELOG -## Release v0.64.0 +## Release v0.65.0 ### New Features and Improvements -* Enabled asynchronous token refreshes by default ([#1208](https://github.com/databricks/databricks-sdk-go/pull/1208)). ### Bug Fixes @@ -12,20 +11,3 @@ ### Internal Changes ### API Changes -* Added `UpdateEndpointBudgetPolicy` and `UpdateEndpointCustomTags` methods for [w.VectorSearchEndpoints](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI) workspace-level service. -* Added `NodeTypeFlexibility` field for [compute.EditInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#EditInstancePool). -* Added `PageSize` and `PageToken` fields for [compute.GetEvents](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEvents). -* Added `NextPageToken` and `PrevPageToken` fields for [compute.GetEventsResponse](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetEventsResponse). -* Added `NodeTypeFlexibility` field for [compute.GetInstancePool](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#GetInstancePool). -* Added `NodeTypeFlexibility` field for [compute.InstancePoolAndStats](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#InstancePoolAndStats). -* Added `EffectivePerformanceTarget` field for [jobs.RepairHistoryItem](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairHistoryItem). -* Added `PerformanceTarget` field for [jobs.RepairRun](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#RepairRun). -* Added `BudgetPolicyId` field for [vectorsearch.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#CreateEndpoint). -* Added `CustomTags` and `EffectiveBudgetPolicyId` fields for [vectorsearch.EndpointInfo](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#EndpointInfo). -* Added `Disabled` enum value for [jobs.TerminationCodeCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/jobs#TerminationCodeCode). -* [Breaking] Changed `CreateIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service to return [vectorsearch.VectorIndex](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorIndex). -* [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service . HTTP method/verb has changed. -* [Breaking] Changed `DeleteDataVectorIndex` method for [w.VectorSearchIndexes](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchIndexesAPI) workspace-level service with new required argument order. -* [Breaking] Changed `DataArray` field for [vectorsearch.ResultData](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ResultData) to type [vectorsearch.ListValueList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValueList). -* [Breaking] Changed waiter for [VectorSearchEndpointsAPI.CreateEndpoint](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#VectorSearchEndpointsAPI.CreateEndpoint). -* [Breaking] Removed `NullValue` field for [vectorsearch.Value](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#Value). diff --git a/version/version.go b/version/version.go index 0419e2713..ad717b25d 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.63.0" +const Version = "0.64.0" From be2e0dbf085525b21891431e4a7c22d1bd39a703 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Tue, 29 Apr 2025 13:14:21 +0200 Subject: [PATCH 44/54] Implement Databricks OIDC as Token Source (#1204) ## What changes are proposed in this pull request? Implement Databricks OIDC as Token Source ## How is this tested? Added unit and integration tests --- NEXT_CHANGELOG.md | 5 + README.md | 47 ++-- config/auth_azure_github_oidc.go | 38 +-- config/auth_databricks_oidc.go | 91 +++++++ config/auth_databricks_oidc_test.go | 298 +++++++++++++++++++++ config/auth_default.go | 74 +++-- config/config.go | 3 + config/id_token_source_github_oidc.go | 45 ++++ config/id_token_source_github_oidc_test.go | 91 +++++++ config/oauth_visitors.go | 7 +- config/token_source_strategy.go | 62 +++++ config/token_source_strategy_test.go | 69 +++++ internal/auth_test.go | 153 +++++++++++ 13 files changed, 920 insertions(+), 63 deletions(-) create mode 100644 config/auth_databricks_oidc.go create mode 100644 config/auth_databricks_oidc_test.go create mode 100644 config/id_token_source_github_oidc.go create mode 100644 config/id_token_source_github_oidc_test.go create mode 100644 config/token_source_strategy.go create mode 100644 config/token_source_strategy_test.go create mode 100644 internal/auth_test.go diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index a80f60d2b..7e31bcafc 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -3,6 +3,11 @@ ## Release v0.65.0 ### New Features and Improvements +* Introduce support for Databricks Workload Identity Federation in GitHub workflows ([1177](https://github.com/databricks/databricks-sdk-go/pull/1177)). + See README.md for instructions. +* [Breaking] Users running their worklows in GitHub Actions, which use Cloud native authentication and also have a `DATABRICKS_CLIENT_ID` and `DATABRICKS_HOST` + environment variables set may see their authentication start failing due to the order in which the SDK tries different authentication methods. + In such case, the `DATABRICKS_AUTH_TYPE` environment variable must be set to match the previously used authentication method. ### Bug Fixes diff --git a/README.md b/README.md index a299507ee..ae8413273 100644 --- a/README.md +++ b/README.md @@ -14,19 +14,35 @@ The Databricks SDK for Go includes functionality to accelerate development with ## Contents -- [Getting started](#getting-started) -- [Authentication](#authentication) -- [Code examples](#code-examples) -- [Long running operations](#long-running-operations) -- [Paginated responses](#paginated-responses) -- [GetByName utility methods](#getbyname-utility-methods) -- [Node type and Databricks Runtime selectors](#node-type-and-databricks-runtime-selectors) -- [Integration with `io` interfaces for DBFS](#integration-with-io-interfaces-for-dbfs) -- [User Agent Request Attribution](#user-agent-request-attribution) -- [Error Handling](#error-handling) -- [Logging](#logging) +- [Databricks SDK for Go](#databricks-sdk-for-go) + - [Contents](#contents) + - [Getting started](#getting-started) + - [Authentication](#authentication) + - [In this section](#in-this-section) + - [Default authentication flow](#default-authentication-flow) + - [Databricks native authentication](#databricks-native-authentication) + - [Azure native authentication](#azure-native-authentication) + - [Google Cloud Platform native authentication](#google-cloud-platform-native-authentication) + - [Overriding `.databrickscfg`](#overriding-databrickscfg) + - [Additional authentication configuration options](#additional-authentication-configuration-options) + - [Custom credentials provider](#custom-credentials-provider) + - [Code examples](#code-examples) + - [Long-running operations](#long-running-operations) + - [In this section](#in-this-section-1) + - [Command execution on clusters](#command-execution-on-clusters) + - [Cluster library management](#cluster-library-management) + - [Advanced usage](#advanced-usage) + - [Paginated responses](#paginated-responses) + - [`GetByName` utility methods](#getbyname-utility-methods) + - [Node type and Databricks Runtime selectors](#node-type-and-databricks-runtime-selectors) + - [Integration with `io` interfaces for DBFS](#integration-with-io-interfaces-for-dbfs) + - [Reading into and writing from buffers](#reading-into-and-writing-from-buffers) + - [`pflag.Value` for enums](#pflagvalue-for-enums) + - [User Agent Request Attribution](#user-agent-request-attribution) + - [Error handling](#error-handling) + - [Logging](#logging) - [Testing](#testing) -- [Interface stability](#interface-stability) + - [Interface stability](#interface-stability) ## Getting started @@ -158,18 +174,17 @@ Depending on the Databricks authentication method, the SDK uses the following in ### Databricks native authentication -By default, the Databricks SDK for Go initially tries Databricks token authentication (`AuthType: "pat"` in `*databricks.Config`). If the SDK is unsuccessful, it then tries Databricks basic (username/password) authentication (`AuthType: "basic"` in `*databricks.Config`). +By default, the Databricks SDK for Go initially tries Databricks token authentication (`AuthType: "pat"` in `*databricks.Config`). If the SDK is unsuccessful, it then tries Workload Identity Federation (WIF) based authentication(`AuthType: "github-oidc"` in `*databricks.Config`). Currently, only GitHub provided JWT Tokens is supported. - For Databricks token authentication, you must provide `Host` and `Token`; or their environment variable or `.databrickscfg` file field equivalents. -- For Databricks basic authentication, you must provide `Host`, `Username`, and `Password` _(for AWS workspace-level operations)_; or `Host`, `AccountID`, `Username`, and `Password` _(for AWS, Azure, or GCP account-level operations)_; or their environment variable or `.databrickscfg` file field equivalents. +- For Databricks OIDC authentication, you must provide the `Host`, `ClientId` and `TokenAudience` _(optional)_ either directly, through the corresponding environment variables, or in your `.databrickscfg` configuration file. More information can be found in [Databricks Documentation](https://docs.databricks.com/aws/en/dev-tools/auth/oauth-federation#workload-identity-federation) | `*databricks.Config` argument | Description | Environment variable / `.databrickscfg` file field | | ----------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------------------------------- | | `Host` | _(String)_ The Databricks host URL for either the Databricks workspace endpoint or the Databricks accounts endpoint. | `DATABRICKS_HOST` / `host` | | `AccountID` | _(String)_ The Databricks account ID for the Databricks accounts endpoint. Only has effect when `Host` is either `https://accounts.cloud.databricks.com/` _(AWS)_, `https://accounts.azuredatabricks.net/` _(Azure)_, or `https://accounts.gcp.databricks.com/` _(GCP)_. | `DATABRICKS_ACCOUNT_ID` / `account_id` | | `Token` | _(String)_ The Databricks personal access token (PAT) _(AWS, Azure, and GCP)_ or Azure Active Directory (Azure AD) token _(Azure)_. | `DATABRICKS_TOKEN` / `token` | -| `Username` | _(String)_ The Databricks username part of basic authentication. Only possible when `Host` is `*.cloud.databricks.com` _(AWS)_. | `DATABRICKS_USERNAME` / `username` | -| `Password` | _(String)_ The Databricks password part of basic authentication. Only possible when `Host` is `*.cloud.databricks.com` _(AWS)_. | `DATABRICKS_PASSWORD` / `password` | +| `TokenAudience` | _(String)_ When using Workload Identity Federation, the audience to specify when fetching an ID token from the ID token supplier. | `DATABRICKS_TOKEN_AUDIENCE` / `token_audience` | For example, to use Databricks token authentication: diff --git a/config/auth_azure_github_oidc.go b/config/auth_azure_github_oidc.go index 2f82214f2..7be69563f 100644 --- a/config/auth_azure_github_oidc.go +++ b/config/auth_azure_github_oidc.go @@ -8,7 +8,6 @@ import ( "github.com/databricks/databricks-sdk-go/config/credentials" "github.com/databricks/databricks-sdk-go/httpclient" - "github.com/databricks/databricks-sdk-go/logger" "golang.org/x/oauth2" ) @@ -24,15 +23,19 @@ func (c AzureGithubOIDCCredentials) Name() string { // Configure implements [CredentialsStrategy.Configure]. func (c AzureGithubOIDCCredentials) Configure(ctx context.Context, cfg *Config) (credentials.CredentialsProvider, error) { // Sanity check that the config is configured for Azure Databricks. - if !cfg.IsAzure() || cfg.AzureClientID == "" || cfg.Host == "" || cfg.AzureTenantID == "" { + if !cfg.IsAzure() || cfg.AzureClientID == "" || cfg.Host == "" || cfg.AzureTenantID == "" || cfg.ActionsIDTokenRequestURL == "" || cfg.ActionsIDTokenRequestToken == "" { return nil, nil } + supplier := githubIDTokenSource{actionsIDTokenRequestURL: cfg.ActionsIDTokenRequestURL, + actionsIDTokenRequestToken: cfg.ActionsIDTokenRequestToken, + refreshClient: cfg.refreshClient, + } - idToken, err := requestIDToken(ctx, cfg) + idToken, err := supplier.IDToken(ctx, "api://AzureADTokenExchange") if err != nil { return nil, err } - if idToken == "" { + if idToken.Value == "" { return nil, nil } @@ -40,38 +43,13 @@ func (c AzureGithubOIDCCredentials) Configure(ctx context.Context, cfg *Config) aadEndpoint: fmt.Sprintf("%s%s/oauth2/token", cfg.Environment().AzureActiveDirectoryEndpoint(), cfg.AzureTenantID), clientID: cfg.AzureClientID, applicationID: cfg.Environment().AzureApplicationID, - idToken: idToken, + idToken: idToken.Value, httpClient: cfg.refreshClient, } return credentials.NewOAuthCredentialsProvider(refreshableVisitor(ts), ts.Token), nil } -// requestIDToken requests an ID token from the Github Action. -func requestIDToken(ctx context.Context, cfg *Config) (string, error) { - if cfg.ActionsIDTokenRequestURL == "" { - logger.Debugf(ctx, "Missing cfg.ActionsIDTokenRequestURL, likely not calling from a Github action") - return "", nil - } - if cfg.ActionsIDTokenRequestToken == "" { - logger.Debugf(ctx, "Missing cfg.ActionsIDTokenRequestToken, likely not calling from a Github action") - return "", nil - } - - resp := struct { // anonymous struct to parse the response - Value string `json:"value"` - }{} - err := cfg.refreshClient.Do(ctx, "GET", fmt.Sprintf("%s&audience=api://AzureADTokenExchange", cfg.ActionsIDTokenRequestURL), - httpclient.WithRequestHeader("Authorization", fmt.Sprintf("Bearer %s", cfg.ActionsIDTokenRequestToken)), - httpclient.WithResponseUnmarshal(&resp), - ) - if err != nil { - return "", fmt.Errorf("failed to request ID token from %s: %w", cfg.ActionsIDTokenRequestURL, err) - } - - return resp.Value, nil -} - // azureOIDCTokenSource implements [oauth2.TokenSource] to obtain Azure auth // tokens from an ID token. type azureOIDCTokenSource struct { diff --git a/config/auth_databricks_oidc.go b/config/auth_databricks_oidc.go new file mode 100644 index 000000000..434eb49b1 --- /dev/null +++ b/config/auth_databricks_oidc.go @@ -0,0 +1,91 @@ +package config + +import ( + "context" + "errors" + "net/url" + + "github.com/databricks/databricks-sdk-go/config/experimental/auth" + "github.com/databricks/databricks-sdk-go/credentials/u2m" + "github.com/databricks/databricks-sdk-go/logger" + "golang.org/x/oauth2" + "golang.org/x/oauth2/clientcredentials" +) + +// Creates a new Databricks OIDC TokenSource. +func NewDatabricksOIDCTokenSource(cfg DatabricksOIDCTokenSourceConfig) auth.TokenSource { + return &databricksOIDCTokenSource{ + cfg: cfg, + } +} + +// Config for Databricks OIDC TokenSource. +type DatabricksOIDCTokenSourceConfig struct { + // ClientID is the client ID of the Databricks OIDC application. For + // Databricks Service Principal, this is the Application ID of the Service Principal. + ClientID string + // [Optional] AccountID is the account ID of the Databricks Account. + // This is only used for Account level tokens. + AccountID string + // Host is the host of the Databricks account or workspace. + Host string + // TokenEndpointProvider returns the token endpoint for the Databricks OIDC application. + TokenEndpointProvider func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) + // Audience is the audience of the Databricks OIDC application. + // This is only used for Workspace level tokens. + Audience string + // IdTokenSource returns the IDToken to be used for the token exchange. + IdTokenSource IDTokenSource +} + +// databricksOIDCTokenSource is a auth.TokenSource which exchanges a token using +// Workload Identity Federation. +type databricksOIDCTokenSource struct { + cfg DatabricksOIDCTokenSourceConfig +} + +// Token implements [TokenSource.Token] +func (w *databricksOIDCTokenSource) Token(ctx context.Context) (*oauth2.Token, error) { + if w.cfg.ClientID == "" { + logger.Debugf(ctx, "Missing ClientID") + return nil, errors.New("missing ClientID") + } + if w.cfg.Host == "" { + logger.Debugf(ctx, "Missing Host") + return nil, errors.New("missing Host") + } + endpoints, err := w.cfg.TokenEndpointProvider(ctx) + if err != nil { + return nil, err + } + audience := w.determineAudience(endpoints) + idToken, err := w.cfg.IdTokenSource.IDToken(ctx, audience) + if err != nil { + return nil, err + } + + c := &clientcredentials.Config{ + ClientID: w.cfg.ClientID, + AuthStyle: oauth2.AuthStyleInParams, + TokenURL: endpoints.TokenEndpoint, + Scopes: []string{"all-apis"}, + EndpointParams: url.Values{ + "subject_token_type": {"urn:ietf:params:oauth:token-type:jwt"}, + "subject_token": {idToken.Value}, + "grant_type": {"urn:ietf:params:oauth:grant-type:token-exchange"}, + }, + } + return c.Token(ctx) +} + +func (w *databricksOIDCTokenSource) determineAudience(endpoints *u2m.OAuthAuthorizationServer) string { + if w.cfg.Audience != "" { + return w.cfg.Audience + } + // For Databricks Accounts, the account id is the default audience. + if w.cfg.AccountID != "" { + return w.cfg.AccountID + } + // For Databricks Workspaces, the auth endpoint is the default audience. + return endpoints.TokenEndpoint +} diff --git a/config/auth_databricks_oidc_test.go b/config/auth_databricks_oidc_test.go new file mode 100644 index 000000000..388766e14 --- /dev/null +++ b/config/auth_databricks_oidc_test.go @@ -0,0 +1,298 @@ +package config + +import ( + "context" + "errors" + "net/http" + "net/url" + "testing" + + "github.com/databricks/databricks-sdk-go/credentials/u2m" + "github.com/databricks/databricks-sdk-go/httpclient/fixtures" + "github.com/google/go-cmp/cmp" + "golang.org/x/oauth2" +) + +type mockIdTokenProvider struct { + // input + audience string + // output + idToken string + err error +} + +func (m *mockIdTokenProvider) IDToken(ctx context.Context, audience string) (*IDToken, error) { + m.audience = audience + return &IDToken{Value: m.idToken}, m.err +} + +func TestDatabricksOidcTokenSource(t *testing.T) { + testCases := []struct { + desc string + clientID string + accountID string + host string + tokenAudience string + httpTransport http.RoundTripper + oidcEndpointProvider func(context.Context) (*u2m.OAuthAuthorizationServer, error) + idToken string + expectedAudience string + tokenProviderError error + wantToken string + wantErrPrefix *string + }{ + { + desc: "missing host", + clientID: "client-id", + tokenAudience: "token-audience", + wantErrPrefix: errPrefix("missing Host"), + }, + { + desc: "missing client ID", + host: "http://host.com", + tokenAudience: "token-audience", + wantErrPrefix: errPrefix("missing ClientID"), + }, + { + desc: "token provider error", + + clientID: "client-id", + host: "http://host.com", + tokenAudience: "token-audience", + oidcEndpointProvider: func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + TokenEndpoint: "https://host.com/oidc/v1/token", + }, nil + }, + expectedAudience: "token-audience", + tokenProviderError: errors.New("error getting id token"), + wantErrPrefix: errPrefix("error getting id token"), + }, + { + desc: "databricks workspace server error", + clientID: "client-id", + host: "http://host.com", + tokenAudience: "token-audience", + oidcEndpointProvider: func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + TokenEndpoint: "https://host.com/oidc/v1/token", + }, nil + }, + httpTransport: fixtures.MappingTransport{ + "POST /oidc/v1/token": { + Status: http.StatusInternalServerError, + ExpectedHeaders: map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + }, + }, + }, + expectedAudience: "token-audience", + idToken: "id-token-42", + wantErrPrefix: errPrefix("oauth2: cannot fetch token: Internal Server Error"), + }, + { + desc: "invalid auth token", + clientID: "client-id", + host: "http://host.com", + tokenAudience: "token-audience", + oidcEndpointProvider: func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + TokenEndpoint: "https://host.com/oidc/v1/token", + }, nil + }, + httpTransport: fixtures.MappingTransport{ + "POST /oidc/v1/token": { + Status: http.StatusOK, + ExpectedHeaders: map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + }, + Response: map[string]string{ + "foo": "bar", + }, + }, + }, + expectedAudience: "token-audience", + idToken: "id-token-42", + wantErrPrefix: errPrefix("oauth2: server response missing access_token"), + }, + { + desc: "success workspace", + clientID: "client-id", + host: "http://host.com", + tokenAudience: "token-audience", + oidcEndpointProvider: func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + TokenEndpoint: "https://host.com/oidc/v1/token", + }, nil + }, + httpTransport: fixtures.MappingTransport{ + "POST /oidc/v1/token": { + + Status: http.StatusOK, + ExpectedHeaders: map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + }, + ExpectedRequest: url.Values{ + "client_id": {"client-id"}, + "scope": {"all-apis"}, + "subject_token_type": {"urn:ietf:params:oauth:token-type:jwt"}, + "subject_token": {"id-token-42"}, + "grant_type": {"urn:ietf:params:oauth:grant-type:token-exchange"}, + }, + Response: map[string]string{ + "token_type": "access-token", + "access_token": "test-auth-token", + "refresh_token": "refresh", + "expires_on": "0", + }, + }, + }, + expectedAudience: "token-audience", + idToken: "id-token-42", + wantToken: "test-auth-token", + }, + { + desc: "success account", + clientID: "client-id", + accountID: "ac123", + host: "https://accounts.databricks.com", + tokenAudience: "token-audience", + oidcEndpointProvider: func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + TokenEndpoint: "https://host.com/oidc/v1/token", + }, nil + }, + httpTransport: fixtures.MappingTransport{ + "POST /oidc/v1/token": { + Status: http.StatusOK, + ExpectedHeaders: map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + }, + ExpectedRequest: url.Values{ + "client_id": {"client-id"}, + "scope": {"all-apis"}, + "subject_token_type": {"urn:ietf:params:oauth:token-type:jwt"}, + "subject_token": {"id-token-42"}, + "grant_type": {"urn:ietf:params:oauth:grant-type:token-exchange"}, + }, + Response: map[string]string{ + "token_type": "access-token", + "access_token": "test-auth-token", + "refresh_token": "refresh", + "expires_on": "0", + }, + }, + }, + expectedAudience: "token-audience", + idToken: "id-token-42", + wantToken: "test-auth-token", + }, + { + desc: "default token audience account", + clientID: "client-id", + accountID: "ac123", + host: "https://accounts.databricks.com", + oidcEndpointProvider: func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + TokenEndpoint: "https://host.com/oidc/v1/token", + }, nil + }, + httpTransport: fixtures.MappingTransport{ + "POST /oidc/v1/token": { + Status: http.StatusOK, + ExpectedHeaders: map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + }, + Response: map[string]string{ + "token_type": "access-token", + "access_token": "test-auth-token", + "refresh_token": "refresh", + "expires_on": "0", + }, + }, + }, + expectedAudience: "ac123", + idToken: "id-token-42", + wantToken: "test-auth-token", + }, + { + desc: "default token audience workspace", + clientID: "client-id", + host: "https://host.com", + oidcEndpointProvider: func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + TokenEndpoint: "https://host.com/oidc/v1/token", + }, nil + }, + httpTransport: fixtures.MappingTransport{ + "POST /oidc/v1/token": { + Status: http.StatusOK, + ExpectedHeaders: map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + }, + Response: map[string]string{ + "token_type": "access-token", + "access_token": "test-auth-token", + "refresh_token": "refresh", + "expires_on": "0", + }, + }, + }, + expectedAudience: "https://host.com/oidc/v1/token", + idToken: "id-token-42", + wantToken: "test-auth-token", + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + p := &mockIdTokenProvider{ + idToken: tc.idToken, + err: tc.tokenProviderError, + } + + cfg := DatabricksOIDCTokenSourceConfig{ + ClientID: tc.clientID, + AccountID: tc.accountID, + Host: tc.host, + TokenEndpointProvider: tc.oidcEndpointProvider, + Audience: tc.tokenAudience, + IdTokenSource: p, + } + + ts := NewDatabricksOIDCTokenSource(cfg) + if tc.httpTransport != nil { + ts.(*databricksOIDCTokenSource).cfg.TokenEndpointProvider = func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + TokenEndpoint: "https://host.com/oidc/v1/token", + }, nil + } + } + + ctx := context.Background() + if tc.httpTransport != nil { + ctx = context.WithValue(ctx, oauth2.HTTPClient, &http.Client{ + Transport: tc.httpTransport, + }) + } + + token, err := ts.Token(ctx) + if tc.wantErrPrefix == nil && err != nil { + t.Errorf("Token(ctx): got error %q, want none", err) + } + if tc.wantErrPrefix != nil && !hasPrefix(err, *tc.wantErrPrefix) { + t.Errorf("Token(ctx): got error %q, want error with prefix %q", err, *tc.wantErrPrefix) + } + if tc.expectedAudience != p.audience { + t.Errorf("mockTokenProvider: got audience %s, want %s", p.audience, tc.expectedAudience) + } + tokenValue := "" + if token != nil { + tokenValue = token.AccessToken + } + if diff := cmp.Diff(tc.wantToken, tokenValue); diff != "" { + t.Errorf("Authenticate(): mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/config/auth_default.go b/config/auth_default.go index 2170f3d2b..a49757d02 100644 --- a/config/auth_default.go +++ b/config/auth_default.go @@ -9,22 +9,60 @@ import ( "github.com/databricks/databricks-sdk-go/logger" ) -var authProviders = []CredentialsStrategy{ - PatCredentials{}, - BasicCredentials{}, - M2mCredentials{}, - DatabricksCliCredentials, - MetadataServiceCredentials{}, - - // Attempt to configure auth from most specific to most generic (the Azure CLI). - AzureGithubOIDCCredentials{}, - AzureMsiCredentials{}, - AzureClientSecretCredentials{}, - AzureCliCredentials{}, +// Constructs all Databricks OIDC Credentials Strategies +func buildOidcTokenCredentialStrategies(cfg *Config) []CredentialsStrategy { + type namedIdTokenSource struct { + name string + tokenSource IDTokenSource + } + idTokenSources := []namedIdTokenSource{ + { + name: "github-oidc", + tokenSource: &githubIDTokenSource{ + actionsIDTokenRequestURL: cfg.ActionsIDTokenRequestURL, + actionsIDTokenRequestToken: cfg.ActionsIDTokenRequestToken, + refreshClient: cfg.refreshClient, + }, + }, + // Add new providers at the end of the list + } + strategies := []CredentialsStrategy{} + for _, idTokenSource := range idTokenSources { + oidcConfig := DatabricksOIDCTokenSourceConfig{ + ClientID: cfg.ClientID, + Host: cfg.CanonicalHostName(), + TokenEndpointProvider: cfg.getOidcEndpoints, + Audience: cfg.TokenAudience, + IdTokenSource: idTokenSource.tokenSource, + } + if cfg.IsAccountClient() { + oidcConfig.AccountID = cfg.AccountID + } + tokenSource := NewDatabricksOIDCTokenSource(oidcConfig) + strategies = append(strategies, NewTokenSourceStrategy(idTokenSource.name, tokenSource)) + } + return strategies +} - // Attempt to configure auth from most specific to most generic (Google Application Default Credentials). - GoogleCredentials{}, - GoogleDefaultCredentials{}, +func buildDefaultStrategies(cfg *Config) []CredentialsStrategy { + strategies := []CredentialsStrategy{} + strategies = append(strategies, + PatCredentials{}, + BasicCredentials{}, + M2mCredentials{}, + DatabricksCliCredentials, + MetadataServiceCredentials{}) + strategies = append(strategies, buildOidcTokenCredentialStrategies(cfg)...) + strategies = append(strategies, + // Attempt to configure auth from most specific to most generic (the Azure CLI). + AzureGithubOIDCCredentials{}, + AzureMsiCredentials{}, + AzureClientSecretCredentials{}, + AzureCliCredentials{}, + // Attempt to configure auth from most specific to most generic (Google Application Default Credentials). + GoogleCredentials{}, + GoogleDefaultCredentials{}) + return strategies } type DefaultCredentials struct { @@ -45,7 +83,11 @@ var errorMessage = fmt.Sprintf("cannot configure default credentials, please che var ErrCannotConfigureAuth = errors.New(errorMessage) func (c *DefaultCredentials) Configure(ctx context.Context, cfg *Config) (credentials.CredentialsProvider, error) { - for _, p := range authProviders { + err := cfg.EnsureResolved() + if err != nil { + return nil, err + } + for _, p := range buildDefaultStrategies(cfg) { if cfg.AuthType != "" && p.Name() != cfg.AuthType { // ignore other auth types if one is explicitly enforced logger.Infof(ctx, "Ignoring %s auth, because %s is preferred", p.Name(), cfg.AuthType) diff --git a/config/config.go b/config/config.go index 6c14f0a27..6346d6cf8 100644 --- a/config/config.go +++ b/config/config.go @@ -134,6 +134,9 @@ type Config struct { // Environment override to return when resolving the current environment. DatabricksEnvironment *environment.DatabricksEnvironment + // When using Workload Identity Federation, the audience to specify when fetching an ID token from the ID token supplier. + TokenAudience string `name:"audience" env:"DATABRICKS_TOKEN_AUDIENCE" auth:"-"` + Loaders []Loader // marker for configuration resolving diff --git a/config/id_token_source_github_oidc.go b/config/id_token_source_github_oidc.go new file mode 100644 index 000000000..6f4048226 --- /dev/null +++ b/config/id_token_source_github_oidc.go @@ -0,0 +1,45 @@ +package config + +import ( + "context" + "errors" + "fmt" + + "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/logger" +) + +// githubIDTokenSource retrieves JWT Tokens from Github Actions. +type githubIDTokenSource struct { + actionsIDTokenRequestURL string + actionsIDTokenRequestToken string + refreshClient *httpclient.ApiClient +} + +// IDToken returns a JWT Token for the specified audience. It will return +// an error if not running in GitHub Actions. +func (g *githubIDTokenSource) IDToken(ctx context.Context, audience string) (*IDToken, error) { + if g.actionsIDTokenRequestURL == "" { + logger.Debugf(ctx, "Missing ActionsIDTokenRequestURL, likely not calling from a Github action") + return nil, errors.New("missing ActionsIDTokenRequestURL") + } + if g.actionsIDTokenRequestToken == "" { + logger.Debugf(ctx, "Missing ActionsIDTokenRequestToken, likely not calling from a Github action") + return nil, errors.New("missing ActionsIDTokenRequestToken") + } + + resp := &IDToken{} + requestUrl := g.actionsIDTokenRequestURL + if audience != "" { + requestUrl = fmt.Sprintf("%s&audience=%s", requestUrl, audience) + } + err := g.refreshClient.Do(ctx, "GET", requestUrl, + httpclient.WithRequestHeader("Authorization", fmt.Sprintf("Bearer %s", g.actionsIDTokenRequestToken)), + httpclient.WithResponseUnmarshal(resp), + ) + if err != nil { + return nil, fmt.Errorf("failed to request ID token from %s: %w", g.actionsIDTokenRequestURL, err) + } + + return resp, nil +} diff --git a/config/id_token_source_github_oidc_test.go b/config/id_token_source_github_oidc_test.go new file mode 100644 index 000000000..58a1bbc2b --- /dev/null +++ b/config/id_token_source_github_oidc_test.go @@ -0,0 +1,91 @@ +package config + +import ( + "context" + "net/http" + "testing" + + "github.com/databricks/databricks-sdk-go/httpclient" + "github.com/databricks/databricks-sdk-go/httpclient/fixtures" + "github.com/google/go-cmp/cmp" +) + +func TestGithubIDTokenSource(t *testing.T) { + testCases := []struct { + desc string + tokenRequestUrl string + tokenRequestToken string + audience string + httpTransport http.RoundTripper + wantToken *IDToken + wantErrPrefix *string + }{ + { + desc: "missing request token url", + tokenRequestToken: "token-1337", + wantErrPrefix: errPrefix("missing ActionsIDTokenRequestURL"), + }, + { + desc: "missing request token token", + tokenRequestUrl: "http://endpoint.com/test?version=1", + wantErrPrefix: errPrefix("missing ActionsIDTokenRequestToken"), + }, + { + desc: "error getting token", + tokenRequestToken: "token-1337", + tokenRequestUrl: "http://endpoint.com/test?version=1", + httpTransport: fixtures.MappingTransport{ + "GET /test?version=1": { + Status: http.StatusInternalServerError, + ExpectedHeaders: map[string]string{ + "Authorization": "Bearer token-1337", + "Accept": "application/json", + }, + }, + }, + wantErrPrefix: errPrefix("failed to request ID token from"), + }, + { + desc: "success", + tokenRequestToken: "token-1337", + tokenRequestUrl: "http://endpoint.com/test?version=1", + httpTransport: fixtures.MappingTransport{ + "GET /test?version=1": { + Status: http.StatusOK, + ExpectedHeaders: map[string]string{ + "Authorization": "Bearer token-1337", + "Accept": "application/json", + }, + Response: `{"value": "id-token-42"}`, + }, + }, + wantToken: &IDToken{ + Value: "id-token-42", + }, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + cli := httpclient.NewApiClient(httpclient.ClientConfig{ + Transport: tc.httpTransport, + }) + p := &githubIDTokenSource{ + actionsIDTokenRequestURL: tc.tokenRequestUrl, + actionsIDTokenRequestToken: tc.tokenRequestToken, + refreshClient: cli, + } + token, gotErr := p.IDToken(context.Background(), tc.audience) + + if tc.wantErrPrefix == nil && gotErr != nil { + t.Errorf("Authenticate(): got error %q, want none", gotErr) + } + if tc.wantErrPrefix != nil && !hasPrefix(gotErr, *tc.wantErrPrefix) { + t.Errorf("Authenticate(): got error %q, want error with prefix %q", gotErr, *tc.wantErrPrefix) + } + if diff := cmp.Diff(tc.wantToken, token); diff != "" { + t.Errorf("Authenticate(): mismatch (-want +got):\n%s", diff) + } + }) + } +} diff --git a/config/oauth_visitors.go b/config/oauth_visitors.go index fc7a3d153..69fadc03f 100644 --- a/config/oauth_visitors.go +++ b/config/oauth_visitors.go @@ -35,7 +35,12 @@ func serviceToServiceVisitor(primary, secondary oauth2.TokenSource, secondaryHea // The same as serviceToServiceVisitor, but without a secondary token source. func refreshableVisitor(inner oauth2.TokenSource) func(r *http.Request) error { - cts := auth.NewCachedTokenSource(authconv.AuthTokenSource(inner)) + return refreshableAuthVisitor(authconv.AuthTokenSource(inner)) +} + +// The same as serviceToServiceVisitor, but without a secondary token source. +func refreshableAuthVisitor(inner auth.TokenSource) func(r *http.Request) error { + cts := auth.NewCachedTokenSource(inner) return func(r *http.Request) error { inner, err := cts.Token(context.Background()) if err != nil { diff --git a/config/token_source_strategy.go b/config/token_source_strategy.go new file mode 100644 index 000000000..fd5d995ce --- /dev/null +++ b/config/token_source_strategy.go @@ -0,0 +1,62 @@ +package config + +import ( + "context" + "fmt" + + "github.com/databricks/databricks-sdk-go/config/credentials" + "github.com/databricks/databricks-sdk-go/config/experimental/auth" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/authconv" + "github.com/databricks/databricks-sdk-go/logger" +) + +// IDToken is a token that can be exchanged for a an access token. +// Value is the token string. +type IDToken struct { + Value string +} + +// IDTokenSource is anything that returns an IDToken given an audience. +type IDTokenSource interface { + // Function to get the token + IDToken(ctx context.Context, audience string) (*IDToken, error) +} + +// Creates a CredentialsStrategy from a TokenSource. +func NewTokenSourceStrategy( + name string, + tokenSource auth.TokenSource, +) CredentialsStrategy { + return &tokenSourceStrategy{ + name: name, + tokenSource: tokenSource, + } +} + +// tokenSourceStrategy is wrapper on a auth.TokenSource which converts it into a CredentialsStrategy +type tokenSourceStrategy struct { + tokenSource auth.TokenSource + name string +} + +// Configure implements [CredentialsStrategy.Configure]. +func (t *tokenSourceStrategy) Configure(ctx context.Context, cfg *Config) (credentials.CredentialsProvider, error) { + + // If we cannot get a token, skip this CredentialsStrategy. + // We don't want to fail here because it's possible that the supplier is enabled + // without the user action. For instance, jobs running in GitHub will have + // OIDC environment variables added automatically + cached := auth.NewCachedTokenSource(t.tokenSource) + if _, err := cached.Token(ctx); err != nil { + logger.Debugf(ctx, fmt.Sprintf("Skipping %s due to error: %v", t.name, err)) + return nil, nil + } + + visitor := refreshableAuthVisitor(cached) + return credentials.NewOAuthCredentialsProvider(visitor, authconv.OAuth2TokenSource(cached).Token), nil +} + +// Name implements [CredentialsStrategy.Name]. +func (t *tokenSourceStrategy) Name() string { + return t.name +} diff --git a/config/token_source_strategy_test.go b/config/token_source_strategy_test.go new file mode 100644 index 000000000..48ecc38c7 --- /dev/null +++ b/config/token_source_strategy_test.go @@ -0,0 +1,69 @@ +package config + +import ( + "context" + "errors" + "net/http" + "testing" + + "github.com/databricks/databricks-sdk-go/config/experimental/auth" + "github.com/google/go-cmp/cmp" + "golang.org/x/oauth2" +) + +func TestDatabricksTokenSourceStrategy(t *testing.T) { + testCases := []struct { + desc string + token *oauth2.Token + tokenSourceError error + wantHeaders http.Header + }{ + { + desc: "token source error skips", + tokenSourceError: errors.New("random error"), + }, + { + desc: "token source error skips", + token: &oauth2.Token{ + AccessToken: "token-123", + }, + wantHeaders: http.Header{"Authorization": {"Bearer token-123"}}, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + strat := &tokenSourceStrategy{ + name: "github-oidc", + tokenSource: auth.TokenSourceFn(func(_ context.Context) (*oauth2.Token, error) { + return tc.token, tc.tokenSourceError + }), + } + provider, err := strat.Configure(context.Background(), &Config{}) + if tc.tokenSourceError == nil && provider == nil { + t.Error("Provider expected to not be nil, but it is") + } + if tc.tokenSourceError != nil && provider != nil { + t.Error("A failure in the TokenSource should cause the provider to be nil, but it's not") + } + if err != nil { + t.Errorf("Configure() got error %q, want none", err) + } + + if provider != nil { + req, _ := http.NewRequest("GET", "http://localhost", nil) + + gotErr := provider.SetHeaders(req) + + if gotErr != nil { + t.Errorf("SetHeaders(): got error %q, want none", gotErr) + } + if diff := cmp.Diff(tc.wantHeaders, req.Header); diff != "" { + t.Errorf("Authenticate(): mismatch (-want +got):\n%s", diff) + } + + } + + }) + } +} diff --git a/internal/auth_test.go b/internal/auth_test.go new file mode 100644 index 000000000..ef12d40a5 --- /dev/null +++ b/internal/auth_test.go @@ -0,0 +1,153 @@ +package internal + +import ( + "strconv" + "testing" + + "github.com/databricks/databricks-sdk-go" + "github.com/databricks/databricks-sdk-go/apierr" + "github.com/databricks/databricks-sdk-go/service/iam" + "github.com/databricks/databricks-sdk-go/service/oauth2" + "github.com/stretchr/testify/require" +) + +func TestUcAccWifAuth(t *testing.T) { + // This test cannot be run locally. It can only be run from GitHub Workflows. + _ = GetEnvOrSkipTest(t, "ACTIONS_ID_TOKEN_REQUEST_URL") + ctx, a := ucacctTest(t) + + // Create SP with access to the workspace + sp, err := a.ServicePrincipals.Create(ctx, iam.ServicePrincipal{ + Active: true, + DisplayName: RandomName("go-sdk-sp-"), + Roles: []iam.ComplexValue{ + {Value: "account_admin"}, // Assigning account-level admin role + }, + }) + require.NoError(t, err) + t.Cleanup(func() { + err := a.ServicePrincipals.Delete(ctx, iam.DeleteAccountServicePrincipalRequest{Id: sp.Id}) + require.True(t, err == nil || apierr.IsMissing(err)) + }) + + spId, err := strconv.ParseInt(sp.Id, 10, 64) + require.NoError(t, err) + + // Setup Federation Policy + p, err := a.ServicePrincipalFederationPolicy.Create(ctx, oauth2.CreateServicePrincipalFederationPolicyRequest{ + Policy: &oauth2.FederationPolicy{ + OidcPolicy: &oauth2.OidcFederationPolicy{ + Issuer: "https://token.actions.githubusercontent.com", + Audiences: []string{ + "https://github.com/databricks-eng", + }, + Subject: "repo:databricks-eng/eng-dev-ecosystem:environment:integration-tests", + }, + }, + ServicePrincipalId: spId, + }) + + require.NoError(t, err) + t.Cleanup(func() { + err := a.ServicePrincipalFederationPolicy.Delete(ctx, oauth2.DeleteServicePrincipalFederationPolicyRequest{ + ServicePrincipalId: spId, + PolicyId: p.Uid, + }) + require.True(t, err == nil || apierr.IsMissing(err)) + }) + + // Test Workspace Identity Federation at Account Level + + accCfg := &databricks.Config{ + Host: a.Config.Host, + AccountID: a.Config.AccountID, + ClientID: sp.ApplicationId, + AuthType: "github-oidc", + TokenAudience: "https://github.com/databricks-eng", + } + + wifAccClient, err := databricks.NewAccountClient(accCfg) + + require.NoError(t, err) + it := wifAccClient.Groups.List(ctx, iam.ListAccountGroupsRequest{}) + _, err = it.Next(ctx) + require.NoError(t, err) + +} + +func TestUcAccWifAuthWorkspace(t *testing.T) { + // This test cannot be run locally. It can only be run from GitHub Workflows. + _ = GetEnvOrSkipTest(t, "ACTIONS_ID_TOKEN_REQUEST_URL") + ctx, a := ucacctTest(t) + + workspaceIdEnvVar := GetEnvOrSkipTest(t, "TEST_WORKSPACE_ID") + workspaceId, err := strconv.ParseInt(workspaceIdEnvVar, 10, 64) + require.NoError(t, err) + + workspaceUrl := GetEnvOrSkipTest(t, "TEST_WORKSPACE_URL") + + // Create SP with access to the workspace + sp, err := a.ServicePrincipals.Create(ctx, iam.ServicePrincipal{ + Active: true, + DisplayName: RandomName("go-sdk-sp-"), + }) + require.NoError(t, err) + t.Cleanup(func() { + err := a.ServicePrincipals.Delete(ctx, iam.DeleteAccountServicePrincipalRequest{Id: sp.Id}) + require.True(t, err == nil || apierr.IsMissing(err)) + }) + + spId, err := strconv.ParseInt(sp.Id, 10, 64) + require.NoError(t, err) + + _, err = a.WorkspaceAssignment.Update(ctx, iam.UpdateWorkspaceAssignments{ + WorkspaceId: workspaceId, + PrincipalId: spId, + Permissions: []iam.WorkspacePermission{iam.WorkspacePermissionAdmin}, + }) + + require.NoError(t, err) + t.Cleanup(func() { + err := a.WorkspaceAssignment.Delete(ctx, iam.DeleteWorkspaceAssignmentRequest{ + PrincipalId: spId, + WorkspaceId: workspaceId, + }) + require.True(t, err == nil || apierr.IsMissing(err)) + }) + + // Setup Federation Policy + p, err := a.ServicePrincipalFederationPolicy.Create(ctx, oauth2.CreateServicePrincipalFederationPolicyRequest{ + Policy: &oauth2.FederationPolicy{ + OidcPolicy: &oauth2.OidcFederationPolicy{ + Issuer: "https://token.actions.githubusercontent.com", + Audiences: []string{ + "https://github.com/databricks-eng", + }, + Subject: "repo:databricks-eng/eng-dev-ecosystem:environment:integration-tests", + }, + }, + ServicePrincipalId: spId, + }) + + require.NoError(t, err) + t.Cleanup(func() { + err := a.ServicePrincipalFederationPolicy.Delete(ctx, oauth2.DeleteServicePrincipalFederationPolicyRequest{ + ServicePrincipalId: spId, + PolicyId: p.Uid, + }) + require.True(t, err == nil || apierr.IsMissing(err)) + }) + + wsCfg := &databricks.Config{ + Host: workspaceUrl, + ClientID: sp.ApplicationId, + AuthType: "github-oidc", + TokenAudience: "https://github.com/databricks-eng", + } + + wifWsClient, err := databricks.NewWorkspaceClient(wsCfg) + + require.NoError(t, err) + _, err = wifWsClient.CurrentUser.Me(ctx) + require.NoError(t, err) +} From 07873068b2bf42e43da729fb6c8546a9abb115a6 Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Tue, 29 Apr 2025 11:37:02 +0000 Subject: [PATCH 45/54] [Release] Release v0.65.0 ## Release v0.65.0 ### New Features and Improvements * Introduce support for Databricks Workload Identity Federation in GitHub workflows ([1177](https://github.com/databricks/databricks-sdk-go/pull/1177)). See README.md for instructions. * [Breaking] Users running their worklows in GitHub Actions, which use Cloud native authentication and also have a `DATABRICKS_CLIENT_ID` and `DATABRICKS_HOST` environment variables set may see their authentication start failing due to the order in which the SDK tries different authentication methods. In such case, the `DATABRICKS_AUTH_TYPE` environment variable must be set to match the previously used authentication method. --- .release_metadata.json | 2 +- CHANGELOG.md | 10 ++++++++++ NEXT_CHANGELOG.md | 7 +------ version/version.go | 2 +- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/.release_metadata.json b/.release_metadata.json index 008a2f2a7..48af521ab 100644 --- a/.release_metadata.json +++ b/.release_metadata.json @@ -1,3 +1,3 @@ { - "timestamp": "2025-04-24 18:25:56+0000" + "timestamp": "2025-04-29 11:36:58+0000" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 36644701b..b198ed3e6 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Version changelog +## Release v0.65.0 + +### New Features and Improvements +* Introduce support for Databricks Workload Identity Federation in GitHub workflows ([1177](https://github.com/databricks/databricks-sdk-go/pull/1177)). + See README.md for instructions. +* [Breaking] Users running their worklows in GitHub Actions, which use Cloud native authentication and also have a `DATABRICKS_CLIENT_ID` and `DATABRICKS_HOST` + environment variables set may see their authentication start failing due to the order in which the SDK tries different authentication methods. + In such case, the `DATABRICKS_AUTH_TYPE` environment variable must be set to match the previously used authentication method. + + ## Release v0.64.0 ### New Features and Improvements diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 7e31bcafc..ee2122a8a 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,13 +1,8 @@ # NEXT CHANGELOG -## Release v0.65.0 +## Release v0.66.0 ### New Features and Improvements -* Introduce support for Databricks Workload Identity Federation in GitHub workflows ([1177](https://github.com/databricks/databricks-sdk-go/pull/1177)). - See README.md for instructions. -* [Breaking] Users running their worklows in GitHub Actions, which use Cloud native authentication and also have a `DATABRICKS_CLIENT_ID` and `DATABRICKS_HOST` - environment variables set may see their authentication start failing due to the order in which the SDK tries different authentication methods. - In such case, the `DATABRICKS_AUTH_TYPE` environment variable must be set to match the previously used authentication method. ### Bug Fixes diff --git a/version/version.go b/version/version.go index ad717b25d..023937540 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.64.0" +const Version = "0.65.0" From eab964f3172844ba6280b2eb57b5562a16bf16fd Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 30 Apr 2025 09:59:22 +0200 Subject: [PATCH 46/54] Tolerate trailing slash (#1211) ## What changes are proposed in this pull request? A recent refactor of U2M logic to the SDK introduced a small bug where a hostname with a trailing slash would be rejected when using `databricks auth login`, e.g. `databricks auth login --host https://myworkspace.cloud.databricks.com/`. This happens because `AccountOAuthArgument` and `WorkspaceOAuthArgument` both validate that the provided host does not have a trailing slash. Instead, we can tolerate hostnames ending with a slash by using the `CanonicalHostName()` method of `Config` to normalize the hostname, removing the unneeded trailing slash before passing the normalized hostname to U2M auth components. I have made a similar fix to `getOidcEndpoints`. ## How is this tested? Unit tests are added to verify that `getOauthArgument` on `Config` works with normalized hostnames, and same for `getOidcEndpoints`. --- NEXT_CHANGELOG.md | 1 + config/config.go | 10 +-- config/config_test.go | 150 +++++++++++++++++++++++++++++++++++------- 3 files changed, 133 insertions(+), 28 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index ee2122a8a..c5cdbb641 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -5,6 +5,7 @@ ### New Features and Improvements ### Bug Fixes +* Tolerate trailing slashes in hostnames in `Config`. ### Documentation diff --git a/config/config.go b/config/config.go index 6346d6cf8..c81f03610 100644 --- a/config/config.go +++ b/config/config.go @@ -461,15 +461,17 @@ func (c *Config) getOidcEndpoints(ctx context.Context) (*u2m.OAuthAuthorizationS oauthClient := &u2m.BasicOAuthEndpointSupplier{ Client: c.refreshClient, } + host := c.CanonicalHostName() if c.IsAccountClient() { - return oauthClient.GetAccountOAuthEndpoints(ctx, c.Host, c.AccountID) + return oauthClient.GetAccountOAuthEndpoints(ctx, host, c.AccountID) } - return oauthClient.GetWorkspaceOAuthEndpoints(ctx, c.Host) + return oauthClient.GetWorkspaceOAuthEndpoints(ctx, host) } func (c *Config) getOAuthArgument() (u2m.OAuthArgument, error) { + host := c.CanonicalHostName() if c.IsAccountClient() { - return u2m.NewBasicAccountOAuthArgument(c.Host, c.AccountID) + return u2m.NewBasicAccountOAuthArgument(host, c.AccountID) } - return u2m.NewBasicWorkspaceOAuthArgument(c.Host) + return u2m.NewBasicWorkspaceOAuthArgument(host) } diff --git a/config/config_test.go b/config/config_test.go index 5a5ac7041..7b117bdbd 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -70,34 +70,136 @@ func TestAuthenticate_InvalidHostSet(t *testing.T) { } func TestConfig_getOidcEndpoints_account(t *testing.T) { - c := &Config{ - Host: "https://accounts.cloud.databricks.com", - AccountID: "abc", + tests := []struct { + name string + host string + accountID string + }{ + { + name: "without trailing slash", + host: "https://accounts.cloud.databricks.com", + accountID: "abc", + }, + { + name: "with trailing slash", + host: "https://accounts.cloud.databricks.com/", + accountID: "abc", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Config{ + Host: tt.host, + AccountID: tt.accountID, + } + got, err := c.getOidcEndpoints(context.Background()) + assert.NoError(t, err) + assert.Equal(t, &u2m.OAuthAuthorizationServer{ + AuthorizationEndpoint: "https://accounts.cloud.databricks.com/oidc/accounts/abc/v1/authorize", + TokenEndpoint: "https://accounts.cloud.databricks.com/oidc/accounts/abc/v1/token", + }, got) + }) } - got, err := c.getOidcEndpoints(context.Background()) - assert.NoError(t, err) - assert.Equal(t, &u2m.OAuthAuthorizationServer{ - AuthorizationEndpoint: "https://accounts.cloud.databricks.com/oidc/accounts/abc/v1/authorize", - TokenEndpoint: "https://accounts.cloud.databricks.com/oidc/accounts/abc/v1/token", - }, got) } func TestConfig_getOidcEndpoints_workspace(t *testing.T) { - c := &Config{ - Host: "https://myworkspace.cloud.databricks.com", - HTTPTransport: fixtures.SliceTransport{ - { - Method: "GET", - Resource: "/oidc/.well-known/oauth-authorization-server", - Status: 200, - Response: `{"authorization_endpoint": "https://myworkspace.cloud.databricks.com/oidc/v1/authorize", "token_endpoint": "https://myworkspace.cloud.databricks.com/oidc/v1/token"}`, - }, + tests := []struct { + name string + host string + }{ + { + name: "without trailing slash", + host: "https://myworkspace.cloud.databricks.com", + }, + { + name: "with trailing slash", + host: "https://myworkspace.cloud.databricks.com/", }, } - got, err := c.getOidcEndpoints(context.Background()) - assert.NoError(t, err) - assert.Equal(t, &u2m.OAuthAuthorizationServer{ - AuthorizationEndpoint: "https://myworkspace.cloud.databricks.com/oidc/v1/authorize", - TokenEndpoint: "https://myworkspace.cloud.databricks.com/oidc/v1/token", - }, got) + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Config{ + Host: tt.host, + HTTPTransport: fixtures.SliceTransport{ + { + Method: "GET", + Resource: "/oidc/.well-known/oauth-authorization-server", + Status: 200, + Response: `{"authorization_endpoint": "https://myworkspace.cloud.databricks.com/oidc/v1/authorize", "token_endpoint": "https://myworkspace.cloud.databricks.com/oidc/v1/token"}`, + }, + }, + } + got, err := c.getOidcEndpoints(context.Background()) + assert.NoError(t, err) + assert.Equal(t, &u2m.OAuthAuthorizationServer{ + AuthorizationEndpoint: "https://myworkspace.cloud.databricks.com/oidc/v1/authorize", + TokenEndpoint: "https://myworkspace.cloud.databricks.com/oidc/v1/token", + }, got) + }) + } +} + +func TestConfig_getOAuthArgument_account(t *testing.T) { + tests := []struct { + name string + host string + accountID string + }{ + { + name: "without trailing slash", + host: "https://accounts.cloud.databricks.com", + accountID: "abc", + }, + { + name: "with trailing slash", + host: "https://accounts.cloud.databricks.com/", + accountID: "abc", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Config{ + Host: tt.host, + AccountID: tt.accountID, + } + rawGot, err := c.getOAuthArgument() + assert.NoError(t, err) + got, ok := rawGot.(u2m.BasicAccountOAuthArgument) + assert.True(t, ok) + assert.Equal(t, "https://accounts.cloud.databricks.com", got.GetAccountHost()) + assert.Equal(t, "abc", got.GetAccountId()) + }) + } +} + +func TestConfig_getOAuthArgument_workspace(t *testing.T) { + tests := []struct { + name string + host string + }{ + { + name: "without trailing slash", + host: "https://myworkspace.cloud.databricks.com", + }, + { + name: "with trailing slash", + host: "https://myworkspace.cloud.databricks.com/", + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &Config{ + Host: tt.host, + } + rawGot, err := c.getOAuthArgument() + assert.NoError(t, err) + got, ok := rawGot.(u2m.BasicWorkspaceOAuthArgument) + assert.True(t, ok) + assert.Equal(t, "https://myworkspace.cloud.databricks.com", got.GetWorkspaceHost()) + }) + } } From cedae03775b56d60b96f5d4415323ae2a2c340db Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Wed, 30 Apr 2025 13:27:10 +0200 Subject: [PATCH 47/54] Bump API specification to 30 Apr 2025 (#1212) ## What changes are proposed in this pull request? Update the API definition of Databricks to the latest version as of 30 April 2025. ## How is this tested? Covered by existing tests. --- .codegen/_openapi_sha | 2 +- NEXT_CHANGELOG.md | 31 + account_client.go | 9 +- experimental/mocks/mock_workspace_client.go | 9 + .../mock_network_connectivity_interface.go | 59 ++ .../service/sql/mock_alerts_v2_interface.go | 595 ++++++++++++++++++ internal/auth_test.go | 4 +- service/apps/model.go | 6 +- service/billing/model.go | 2 +- service/catalog/model.go | 28 +- service/cleanrooms/model.go | 8 +- service/compute/model.go | 17 +- service/dashboards/model.go | 10 +- service/jobs/model.go | 19 +- service/oauth2/model.go | 8 +- service/pkg.go | 5 +- service/serving/model.go | 51 +- service/settings/api.go | 35 +- service/settings/impl.go | 18 +- service/settings/interface.go | 31 +- service/settings/model.go | 239 +++---- service/sql/api.go | 157 ++++- service/sql/impl.go | 92 +++ service/sql/interface.go | 33 + service/sql/model.go | 453 ++++++++++++- workspace_client.go | 4 + 26 files changed, 1728 insertions(+), 197 deletions(-) create mode 100644 experimental/mocks/service/sql/mock_alerts_v2_interface.go diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 8cd956362..e7f752fb5 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -06a18b97d7996d6cd8dd88bfdb0f2c2792739e46 \ No newline at end of file +ce962ccd0a078a5a9d89494fe38d237ce377d5f3 \ No newline at end of file diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index c5cdbb641..0c210415e 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -12,3 +12,34 @@ ### Internal Changes ### API Changes +* Added [w.AlertsV2](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API) workspace-level service. +* Added `UpdateNccAzurePrivateEndpointRulePublic` method for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service. +* Added `CreatedAt`, `CreatedBy` and `MetastoreId` fields for [catalog.SetArtifactAllowlist](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#SetArtifactAllowlist). +* [Breaking] Added `NetworkConnectivityConfig` field for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). +* [Breaking] Added `PrivateEndpointRule` field for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). +* Added `DomainNames` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule). +* Added `AutoResolveDisplayName` field for [sql.CreateAlertRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateAlertRequest). +* Added `AutoResolveDisplayName` field for [sql.CreateQueryRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateQueryRequest). +* Added `CreateCleanRoom`, `ExecuteCleanRoomTask` and `ModifyCleanRoom` enum values for [catalog.Privilege](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#Privilege). +* Added `DnsResolutionError` and `GcpDeniedByOrgPolicy` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* Added `Expired` enum value for [settings.NccAzurePrivateEndpointRuleConnectionState](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleConnectionState). +* [Breaking] Changed `CreateNetworkConnectivityConfiguration` and `CreatePrivateEndpointRule` methods for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service with new required argument order. +* [Breaking] Changed `WorkloadSize` field for [serving.ServedModelInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInput) to type `string`. +* [Breaking] Changed `GroupId` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule) to type `string`. +* [Breaking] Changed `TargetServices` field for [settings.NccAzureServiceEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzureServiceEndpointRule) to type [settings.EgressResourceTypeList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EgressResourceTypeList). +* [Breaking] Removed `Name` and `Region` fields for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). +* [Breaking] Removed `GroupId` and `ResourceId` fields for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). +* [Breaking] Removed `Large`, `Medium` and `Small` enum values for [serving.ServedModelInputWorkloadSize](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInputWorkloadSize). +* [Breaking] Removed `Blob`, `Dfs`, `MysqlServer` and `SqlServer` enum values for [settings.NccAzurePrivateEndpointRuleGroupId](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleGroupId). +* [Breaking] Field `AppDeployment` of `CreateAppDeploymentRequest` is changed from `*AppDeployment` to `AppDeployment`. +* [Breaking] Field `App` of `CreateAppRequest` is changed from `*App` to `App`. +* [Breaking] Field `App` of `UpdateAppRequest` is changed from `*App` to `App`. +* [Breaking] Field `BudgetPolicy` of `UpdateBudgetPolicyRequest` is changed from `*BudgetPolicy` to `BudgetPolicy`. +* [Breaking] Field `OnlineTable` of `CreateOnlineTableRequest` is changed from `*OnlineTable` to `OnlineTable`. +* [Breaking] Field `CleanRoomAsset` of `CreateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. +* [Breaking] Field `CleanRoom` of `CreateCleanRoomRequest` is changed from `*CleanRoom` to `CleanRoom`. +* [Breaking] Field `CleanRoomAsset` of `UpdateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. +* [Breaking] Field `Dashboard` of `CreateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. +* [Breaking] Field `Schedule` of `CreateScheduleRequest` is changed from `*Schedule` to `Schedule`. +* [Breaking] Field `Subscription` of `CreateSubscriptionRequest` is changed from `*Subscription` to `Subscription`. +* [Breaking] Field `Dashboard` of `UpdateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. diff --git a/account_client.go b/account_client.go index fb8aca2e6..d431b9dd1 100755 --- a/account_client.go +++ b/account_client.go @@ -224,7 +224,14 @@ type AccountClient struct { Metastores catalog.AccountMetastoresInterface // These APIs provide configurations for the network connectivity of your - // workspaces for serverless compute resources. + // workspaces for serverless compute resources. This API provides stable + // subnets for your workspace so that you can configure your firewalls on + // your Azure Storage accounts to allow access from Databricks. You can also + // use the API to provision private endpoints for Databricks to privately + // connect serverless compute resources to your Azure resources using Azure + // Private Link. See [configure serverless secure connectivity]. + // + // [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security NetworkConnectivity settings.NetworkConnectivityInterface // These APIs manage network configurations for customer-managed VPCs diff --git a/experimental/mocks/mock_workspace_client.go b/experimental/mocks/mock_workspace_client.go index a14f2697e..fc96bd2a4 100755 --- a/experimental/mocks/mock_workspace_client.go +++ b/experimental/mocks/mock_workspace_client.go @@ -45,6 +45,7 @@ func NewMockWorkspaceClient(t interface { AccountAccessControlProxy: iam.NewMockAccountAccessControlProxyInterface(t), Alerts: sql.NewMockAlertsInterface(t), AlertsLegacy: sql.NewMockAlertsLegacyInterface(t), + AlertsV2: sql.NewMockAlertsV2Interface(t), Apps: apps.NewMockAppsInterface(t), ArtifactAllowlists: catalog.NewMockArtifactAllowlistsInterface(t), Catalogs: catalog.NewMockCatalogsInterface(t), @@ -312,6 +313,14 @@ func (m *MockWorkspaceClient) GetMockAlertsLegacyAPI() *sql.MockAlertsLegacyInte return api } +func (m *MockWorkspaceClient) GetMockAlertsV2API() *sql.MockAlertsV2Interface { + api, ok := m.WorkspaceClient.AlertsV2.(*sql.MockAlertsV2Interface) + if !ok { + panic(fmt.Sprintf("expected AlertsV2 to be *sql.MockAlertsV2Interface, actual was %T", m.WorkspaceClient.AlertsV2)) + } + return api +} + func (m *MockWorkspaceClient) GetMockAppsAPI() *apps.MockAppsInterface { api, ok := m.WorkspaceClient.Apps.(*apps.MockAppsInterface) if !ok { diff --git a/experimental/mocks/service/settings/mock_network_connectivity_interface.go b/experimental/mocks/service/settings/mock_network_connectivity_interface.go index d0d65089d..52ec9fabc 100644 --- a/experimental/mocks/service/settings/mock_network_connectivity_interface.go +++ b/experimental/mocks/service/settings/mock_network_connectivity_interface.go @@ -867,6 +867,65 @@ func (_c *MockNetworkConnectivityInterface_ListPrivateEndpointRulesByNetworkConn return _c } +// UpdateNccAzurePrivateEndpointRulePublic provides a mock function with given fields: ctx, request +func (_m *MockNetworkConnectivityInterface) UpdateNccAzurePrivateEndpointRulePublic(ctx context.Context, request settings.UpdateNccAzurePrivateEndpointRulePublicRequest) (*settings.NccAzurePrivateEndpointRule, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for UpdateNccAzurePrivateEndpointRulePublic") + } + + var r0 *settings.NccAzurePrivateEndpointRule + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateNccAzurePrivateEndpointRulePublicRequest) (*settings.NccAzurePrivateEndpointRule, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, settings.UpdateNccAzurePrivateEndpointRulePublicRequest) *settings.NccAzurePrivateEndpointRule); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*settings.NccAzurePrivateEndpointRule) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, settings.UpdateNccAzurePrivateEndpointRulePublicRequest) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateNccAzurePrivateEndpointRulePublic' +type MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call struct { + *mock.Call +} + +// UpdateNccAzurePrivateEndpointRulePublic is a helper method to define mock.On call +// - ctx context.Context +// - request settings.UpdateNccAzurePrivateEndpointRulePublicRequest +func (_e *MockNetworkConnectivityInterface_Expecter) UpdateNccAzurePrivateEndpointRulePublic(ctx interface{}, request interface{}) *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call { + return &MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call{Call: _e.mock.On("UpdateNccAzurePrivateEndpointRulePublic", ctx, request)} +} + +func (_c *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call) Run(run func(ctx context.Context, request settings.UpdateNccAzurePrivateEndpointRulePublicRequest)) *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(settings.UpdateNccAzurePrivateEndpointRulePublicRequest)) + }) + return _c +} + +func (_c *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call) Return(_a0 *settings.NccAzurePrivateEndpointRule, _a1 error) *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call) RunAndReturn(run func(context.Context, settings.UpdateNccAzurePrivateEndpointRulePublicRequest) (*settings.NccAzurePrivateEndpointRule, error)) *MockNetworkConnectivityInterface_UpdateNccAzurePrivateEndpointRulePublic_Call { + _c.Call.Return(run) + return _c +} + // NewMockNetworkConnectivityInterface creates a new instance of MockNetworkConnectivityInterface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. // The first argument is typically a *testing.T value. func NewMockNetworkConnectivityInterface(t interface { diff --git a/experimental/mocks/service/sql/mock_alerts_v2_interface.go b/experimental/mocks/service/sql/mock_alerts_v2_interface.go new file mode 100644 index 000000000..d48e31c99 --- /dev/null +++ b/experimental/mocks/service/sql/mock_alerts_v2_interface.go @@ -0,0 +1,595 @@ +// Code generated by mockery v2.53.2. DO NOT EDIT. + +package sql + +import ( + context "context" + + listing "github.com/databricks/databricks-sdk-go/listing" + mock "github.com/stretchr/testify/mock" + + sql "github.com/databricks/databricks-sdk-go/service/sql" +) + +// MockAlertsV2Interface is an autogenerated mock type for the AlertsV2Interface type +type MockAlertsV2Interface struct { + mock.Mock +} + +type MockAlertsV2Interface_Expecter struct { + mock *mock.Mock +} + +func (_m *MockAlertsV2Interface) EXPECT() *MockAlertsV2Interface_Expecter { + return &MockAlertsV2Interface_Expecter{mock: &_m.Mock} +} + +// CreateAlert provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) CreateAlert(ctx context.Context, request sql.CreateAlertV2Request) (*sql.AlertV2, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for CreateAlert") + } + + var r0 *sql.AlertV2 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.CreateAlertV2Request) (*sql.AlertV2, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.CreateAlertV2Request) *sql.AlertV2); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.AlertV2) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.CreateAlertV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_CreateAlert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'CreateAlert' +type MockAlertsV2Interface_CreateAlert_Call struct { + *mock.Call +} + +// CreateAlert is a helper method to define mock.On call +// - ctx context.Context +// - request sql.CreateAlertV2Request +func (_e *MockAlertsV2Interface_Expecter) CreateAlert(ctx interface{}, request interface{}) *MockAlertsV2Interface_CreateAlert_Call { + return &MockAlertsV2Interface_CreateAlert_Call{Call: _e.mock.On("CreateAlert", ctx, request)} +} + +func (_c *MockAlertsV2Interface_CreateAlert_Call) Run(run func(ctx context.Context, request sql.CreateAlertV2Request)) *MockAlertsV2Interface_CreateAlert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.CreateAlertV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_CreateAlert_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_CreateAlert_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_CreateAlert_Call) RunAndReturn(run func(context.Context, sql.CreateAlertV2Request) (*sql.AlertV2, error)) *MockAlertsV2Interface_CreateAlert_Call { + _c.Call.Return(run) + return _c +} + +// GetAlert provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) GetAlert(ctx context.Context, request sql.GetAlertV2Request) (*sql.AlertV2, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for GetAlert") + } + + var r0 *sql.AlertV2 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.GetAlertV2Request) (*sql.AlertV2, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.GetAlertV2Request) *sql.AlertV2); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.AlertV2) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.GetAlertV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_GetAlert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAlert' +type MockAlertsV2Interface_GetAlert_Call struct { + *mock.Call +} + +// GetAlert is a helper method to define mock.On call +// - ctx context.Context +// - request sql.GetAlertV2Request +func (_e *MockAlertsV2Interface_Expecter) GetAlert(ctx interface{}, request interface{}) *MockAlertsV2Interface_GetAlert_Call { + return &MockAlertsV2Interface_GetAlert_Call{Call: _e.mock.On("GetAlert", ctx, request)} +} + +func (_c *MockAlertsV2Interface_GetAlert_Call) Run(run func(ctx context.Context, request sql.GetAlertV2Request)) *MockAlertsV2Interface_GetAlert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.GetAlertV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_GetAlert_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_GetAlert_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_GetAlert_Call) RunAndReturn(run func(context.Context, sql.GetAlertV2Request) (*sql.AlertV2, error)) *MockAlertsV2Interface_GetAlert_Call { + _c.Call.Return(run) + return _c +} + +// GetAlertById provides a mock function with given fields: ctx, id +func (_m *MockAlertsV2Interface) GetAlertById(ctx context.Context, id string) (*sql.AlertV2, error) { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for GetAlertById") + } + + var r0 *sql.AlertV2 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*sql.AlertV2, error)); ok { + return rf(ctx, id) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *sql.AlertV2); ok { + r0 = rf(ctx, id) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.AlertV2) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, id) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_GetAlertById_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetAlertById' +type MockAlertsV2Interface_GetAlertById_Call struct { + *mock.Call +} + +// GetAlertById is a helper method to define mock.On call +// - ctx context.Context +// - id string +func (_e *MockAlertsV2Interface_Expecter) GetAlertById(ctx interface{}, id interface{}) *MockAlertsV2Interface_GetAlertById_Call { + return &MockAlertsV2Interface_GetAlertById_Call{Call: _e.mock.On("GetAlertById", ctx, id)} +} + +func (_c *MockAlertsV2Interface_GetAlertById_Call) Run(run func(ctx context.Context, id string)) *MockAlertsV2Interface_GetAlertById_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_GetAlertById_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_GetAlertById_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_GetAlertById_Call) RunAndReturn(run func(context.Context, string) (*sql.AlertV2, error)) *MockAlertsV2Interface_GetAlertById_Call { + _c.Call.Return(run) + return _c +} + +// GetByDisplayName provides a mock function with given fields: ctx, name +func (_m *MockAlertsV2Interface) GetByDisplayName(ctx context.Context, name string) (*sql.ListAlertsV2ResponseAlert, error) { + ret := _m.Called(ctx, name) + + if len(ret) == 0 { + panic("no return value specified for GetByDisplayName") + } + + var r0 *sql.ListAlertsV2ResponseAlert + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, string) (*sql.ListAlertsV2ResponseAlert, error)); ok { + return rf(ctx, name) + } + if rf, ok := ret.Get(0).(func(context.Context, string) *sql.ListAlertsV2ResponseAlert); ok { + r0 = rf(ctx, name) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.ListAlertsV2ResponseAlert) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, string) error); ok { + r1 = rf(ctx, name) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_GetByDisplayName_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'GetByDisplayName' +type MockAlertsV2Interface_GetByDisplayName_Call struct { + *mock.Call +} + +// GetByDisplayName is a helper method to define mock.On call +// - ctx context.Context +// - name string +func (_e *MockAlertsV2Interface_Expecter) GetByDisplayName(ctx interface{}, name interface{}) *MockAlertsV2Interface_GetByDisplayName_Call { + return &MockAlertsV2Interface_GetByDisplayName_Call{Call: _e.mock.On("GetByDisplayName", ctx, name)} +} + +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Run(run func(ctx context.Context, name string)) *MockAlertsV2Interface_GetByDisplayName_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Return(_a0 *sql.ListAlertsV2ResponseAlert, _a1 error) *MockAlertsV2Interface_GetByDisplayName_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) RunAndReturn(run func(context.Context, string) (*sql.ListAlertsV2ResponseAlert, error)) *MockAlertsV2Interface_GetByDisplayName_Call { + _c.Call.Return(run) + return _c +} + +// ListAlerts provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) ListAlerts(ctx context.Context, request sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert] { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ListAlerts") + } + + var r0 listing.Iterator[sql.ListAlertsV2ResponseAlert] + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert]); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(listing.Iterator[sql.ListAlertsV2ResponseAlert]) + } + } + + return r0 +} + +// MockAlertsV2Interface_ListAlerts_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAlerts' +type MockAlertsV2Interface_ListAlerts_Call struct { + *mock.Call +} + +// ListAlerts is a helper method to define mock.On call +// - ctx context.Context +// - request sql.ListAlertsV2Request +func (_e *MockAlertsV2Interface_Expecter) ListAlerts(ctx interface{}, request interface{}) *MockAlertsV2Interface_ListAlerts_Call { + return &MockAlertsV2Interface_ListAlerts_Call{Call: _e.mock.On("ListAlerts", ctx, request)} +} + +func (_c *MockAlertsV2Interface_ListAlerts_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_ListAlerts_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlerts_Call) Return(_a0 listing.Iterator[sql.ListAlertsV2ResponseAlert]) *MockAlertsV2Interface_ListAlerts_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlerts_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert]) *MockAlertsV2Interface_ListAlerts_Call { + _c.Call.Return(run) + return _c +} + +// ListAlertsAll provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) ListAlertsAll(ctx context.Context, request sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ListAlertsAll") + } + + var r0 []sql.ListAlertsV2ResponseAlert + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) []sql.ListAlertsV2ResponseAlert); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).([]sql.ListAlertsV2ResponseAlert) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.ListAlertsV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_ListAlertsAll_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAlertsAll' +type MockAlertsV2Interface_ListAlertsAll_Call struct { + *mock.Call +} + +// ListAlertsAll is a helper method to define mock.On call +// - ctx context.Context +// - request sql.ListAlertsV2Request +func (_e *MockAlertsV2Interface_Expecter) ListAlertsAll(ctx interface{}, request interface{}) *MockAlertsV2Interface_ListAlertsAll_Call { + return &MockAlertsV2Interface_ListAlertsAll_Call{Call: _e.mock.On("ListAlertsAll", ctx, request)} +} + +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_ListAlertsAll_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Return(_a0 []sql.ListAlertsV2ResponseAlert, _a1 error) *MockAlertsV2Interface_ListAlertsAll_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error)) *MockAlertsV2Interface_ListAlertsAll_Call { + _c.Call.Return(run) + return _c +} + +// ListAlertsV2ResponseAlertDisplayNameToIdMap provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request sql.ListAlertsV2Request) (map[string]string, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for ListAlertsV2ResponseAlertDisplayNameToIdMap") + } + + var r0 map[string]string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) map[string]string); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.ListAlertsV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAlertsV2ResponseAlertDisplayNameToIdMap' +type MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call struct { + *mock.Call +} + +// ListAlertsV2ResponseAlertDisplayNameToIdMap is a helper method to define mock.On call +// - ctx context.Context +// - request sql.ListAlertsV2Request +func (_e *MockAlertsV2Interface_Expecter) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx interface{}, request interface{}) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { + return &MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call{Call: _e.mock.On("ListAlertsV2ResponseAlertDisplayNameToIdMap", ctx, request)} +} + +func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) Return(_a0 map[string]string, _a1 error) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { + _c.Call.Return(run) + return _c +} + +// TrashAlert provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) TrashAlert(ctx context.Context, request sql.TrashAlertV2Request) error { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for TrashAlert") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, sql.TrashAlertV2Request) error); ok { + r0 = rf(ctx, request) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockAlertsV2Interface_TrashAlert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TrashAlert' +type MockAlertsV2Interface_TrashAlert_Call struct { + *mock.Call +} + +// TrashAlert is a helper method to define mock.On call +// - ctx context.Context +// - request sql.TrashAlertV2Request +func (_e *MockAlertsV2Interface_Expecter) TrashAlert(ctx interface{}, request interface{}) *MockAlertsV2Interface_TrashAlert_Call { + return &MockAlertsV2Interface_TrashAlert_Call{Call: _e.mock.On("TrashAlert", ctx, request)} +} + +func (_c *MockAlertsV2Interface_TrashAlert_Call) Run(run func(ctx context.Context, request sql.TrashAlertV2Request)) *MockAlertsV2Interface_TrashAlert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.TrashAlertV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_TrashAlert_Call) Return(_a0 error) *MockAlertsV2Interface_TrashAlert_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockAlertsV2Interface_TrashAlert_Call) RunAndReturn(run func(context.Context, sql.TrashAlertV2Request) error) *MockAlertsV2Interface_TrashAlert_Call { + _c.Call.Return(run) + return _c +} + +// TrashAlertById provides a mock function with given fields: ctx, id +func (_m *MockAlertsV2Interface) TrashAlertById(ctx context.Context, id string) error { + ret := _m.Called(ctx, id) + + if len(ret) == 0 { + panic("no return value specified for TrashAlertById") + } + + var r0 error + if rf, ok := ret.Get(0).(func(context.Context, string) error); ok { + r0 = rf(ctx, id) + } else { + r0 = ret.Error(0) + } + + return r0 +} + +// MockAlertsV2Interface_TrashAlertById_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'TrashAlertById' +type MockAlertsV2Interface_TrashAlertById_Call struct { + *mock.Call +} + +// TrashAlertById is a helper method to define mock.On call +// - ctx context.Context +// - id string +func (_e *MockAlertsV2Interface_Expecter) TrashAlertById(ctx interface{}, id interface{}) *MockAlertsV2Interface_TrashAlertById_Call { + return &MockAlertsV2Interface_TrashAlertById_Call{Call: _e.mock.On("TrashAlertById", ctx, id)} +} + +func (_c *MockAlertsV2Interface_TrashAlertById_Call) Run(run func(ctx context.Context, id string)) *MockAlertsV2Interface_TrashAlertById_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(string)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_TrashAlertById_Call) Return(_a0 error) *MockAlertsV2Interface_TrashAlertById_Call { + _c.Call.Return(_a0) + return _c +} + +func (_c *MockAlertsV2Interface_TrashAlertById_Call) RunAndReturn(run func(context.Context, string) error) *MockAlertsV2Interface_TrashAlertById_Call { + _c.Call.Return(run) + return _c +} + +// UpdateAlert provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) UpdateAlert(ctx context.Context, request sql.UpdateAlertV2Request) (*sql.AlertV2, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for UpdateAlert") + } + + var r0 *sql.AlertV2 + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.UpdateAlertV2Request) (*sql.AlertV2, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.UpdateAlertV2Request) *sql.AlertV2); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(*sql.AlertV2) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.UpdateAlertV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_UpdateAlert_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'UpdateAlert' +type MockAlertsV2Interface_UpdateAlert_Call struct { + *mock.Call +} + +// UpdateAlert is a helper method to define mock.On call +// - ctx context.Context +// - request sql.UpdateAlertV2Request +func (_e *MockAlertsV2Interface_Expecter) UpdateAlert(ctx interface{}, request interface{}) *MockAlertsV2Interface_UpdateAlert_Call { + return &MockAlertsV2Interface_UpdateAlert_Call{Call: _e.mock.On("UpdateAlert", ctx, request)} +} + +func (_c *MockAlertsV2Interface_UpdateAlert_Call) Run(run func(ctx context.Context, request sql.UpdateAlertV2Request)) *MockAlertsV2Interface_UpdateAlert_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.UpdateAlertV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_UpdateAlert_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_UpdateAlert_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_UpdateAlert_Call) RunAndReturn(run func(context.Context, sql.UpdateAlertV2Request) (*sql.AlertV2, error)) *MockAlertsV2Interface_UpdateAlert_Call { + _c.Call.Return(run) + return _c +} + +// NewMockAlertsV2Interface creates a new instance of MockAlertsV2Interface. It also registers a testing interface on the mock and a cleanup function to assert the mocks expectations. +// The first argument is typically a *testing.T value. +func NewMockAlertsV2Interface(t interface { + mock.TestingT + Cleanup(func()) +}) *MockAlertsV2Interface { + mock := &MockAlertsV2Interface{} + mock.Mock.Test(t) + + t.Cleanup(func() { mock.AssertExpectations(t) }) + + return mock +} diff --git a/internal/auth_test.go b/internal/auth_test.go index ef12d40a5..e4da34c1b 100644 --- a/internal/auth_test.go +++ b/internal/auth_test.go @@ -35,7 +35,7 @@ func TestUcAccWifAuth(t *testing.T) { // Setup Federation Policy p, err := a.ServicePrincipalFederationPolicy.Create(ctx, oauth2.CreateServicePrincipalFederationPolicyRequest{ - Policy: &oauth2.FederationPolicy{ + Policy: oauth2.FederationPolicy{ OidcPolicy: &oauth2.OidcFederationPolicy{ Issuer: "https://token.actions.githubusercontent.com", Audiences: []string{ @@ -117,7 +117,7 @@ func TestUcAccWifAuthWorkspace(t *testing.T) { // Setup Federation Policy p, err := a.ServicePrincipalFederationPolicy.Create(ctx, oauth2.CreateServicePrincipalFederationPolicyRequest{ - Policy: &oauth2.FederationPolicy{ + Policy: oauth2.FederationPolicy{ OidcPolicy: &oauth2.OidcFederationPolicy{ Issuer: "https://token.actions.githubusercontent.com", Audiences: []string{ diff --git a/service/apps/model.go b/service/apps/model.go index 59d2b1a59..e15a6693e 100755 --- a/service/apps/model.go +++ b/service/apps/model.go @@ -611,14 +611,14 @@ func (s ComputeStatus) MarshalJSON() ([]byte, error) { // Create an app deployment type CreateAppDeploymentRequest struct { - AppDeployment *AppDeployment `json:"app_deployment,omitempty"` + AppDeployment AppDeployment `json:"app_deployment"` // The name of the app. AppName string `json:"-" url:"-"` } // Create an app type CreateAppRequest struct { - App *App `json:"app,omitempty"` + App App `json:"app"` // If true, the app will not be started after creation. NoCompute bool `json:"-" url:"no_compute,omitempty"` @@ -755,7 +755,7 @@ type StopAppRequest struct { // Update an app type UpdateAppRequest struct { - App *App `json:"app,omitempty"` + App App `json:"app"` // The name of the app. The name must contain only lowercase alphanumeric // characters and hyphens. It must be unique within the workspace. Name string `json:"-" url:"-"` diff --git a/service/billing/model.go b/service/billing/model.go index 3de61625b..4f2106982 100644 --- a/service/billing/model.go +++ b/service/billing/model.go @@ -1115,7 +1115,7 @@ type UpdateBudgetPolicyRequest struct { // BudgetPolicy LimitConfig *LimitConfig `json:"-" url:"limit_config,omitempty"` // Contains the BudgetPolicy details. - Policy *BudgetPolicy `json:"policy,omitempty"` + Policy BudgetPolicy `json:"policy"` // The Id of the policy. This field is generated by Databricks and globally // unique. PolicyId string `json:"-" url:"-"` diff --git a/service/catalog/model.go b/service/catalog/model.go index 9708c90cd..d5bc4af35 100755 --- a/service/catalog/model.go +++ b/service/catalog/model.go @@ -1116,7 +1116,7 @@ func (s CreateMonitor) MarshalJSON() ([]byte, error) { // Create an Online Table type CreateOnlineTableRequest struct { // Online Table information. - Table *OnlineTable `json:"table,omitempty"` + Table OnlineTable `json:"table"` } type CreateRegisteredModelRequest struct { @@ -4440,6 +4440,8 @@ const PrivilegeCreate Privilege = `CREATE` const PrivilegeCreateCatalog Privilege = `CREATE_CATALOG` +const PrivilegeCreateCleanRoom Privilege = `CREATE_CLEAN_ROOM` + const PrivilegeCreateConnection Privilege = `CREATE_CONNECTION` const PrivilegeCreateExternalLocation Privilege = `CREATE_EXTERNAL_LOCATION` @@ -4480,12 +4482,16 @@ const PrivilegeCreateVolume Privilege = `CREATE_VOLUME` const PrivilegeExecute Privilege = `EXECUTE` +const PrivilegeExecuteCleanRoomTask Privilege = `EXECUTE_CLEAN_ROOM_TASK` + const PrivilegeManage Privilege = `MANAGE` const PrivilegeManageAllowlist Privilege = `MANAGE_ALLOWLIST` const PrivilegeModify Privilege = `MODIFY` +const PrivilegeModifyCleanRoom Privilege = `MODIFY_CLEAN_ROOM` + const PrivilegeReadFiles Privilege = `READ_FILES` const PrivilegeReadPrivateFiles Privilege = `READ_PRIVATE_FILES` @@ -4528,11 +4534,11 @@ func (f *Privilege) String() string { // Set raw string value and validate it against allowed values func (f *Privilege) Set(v string) error { switch v { - case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `BROWSE`, `CREATE`, `CREATE_CATALOG`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: + case `ACCESS`, `ALL_PRIVILEGES`, `APPLY_TAG`, `BROWSE`, `CREATE`, `CREATE_CATALOG`, `CREATE_CLEAN_ROOM`, `CREATE_CONNECTION`, `CREATE_EXTERNAL_LOCATION`, `CREATE_EXTERNAL_TABLE`, `CREATE_EXTERNAL_VOLUME`, `CREATE_FOREIGN_CATALOG`, `CREATE_FOREIGN_SECURABLE`, `CREATE_FUNCTION`, `CREATE_MANAGED_STORAGE`, `CREATE_MATERIALIZED_VIEW`, `CREATE_MODEL`, `CREATE_PROVIDER`, `CREATE_RECIPIENT`, `CREATE_SCHEMA`, `CREATE_SERVICE_CREDENTIAL`, `CREATE_SHARE`, `CREATE_STORAGE_CREDENTIAL`, `CREATE_TABLE`, `CREATE_VIEW`, `CREATE_VOLUME`, `EXECUTE`, `EXECUTE_CLEAN_ROOM_TASK`, `MANAGE`, `MANAGE_ALLOWLIST`, `MODIFY`, `MODIFY_CLEAN_ROOM`, `READ_FILES`, `READ_PRIVATE_FILES`, `READ_VOLUME`, `REFRESH`, `SELECT`, `SET_SHARE_PERMISSION`, `USAGE`, `USE_CATALOG`, `USE_CONNECTION`, `USE_MARKETPLACE_ASSETS`, `USE_PROVIDER`, `USE_RECIPIENT`, `USE_SCHEMA`, `USE_SHARE`, `WRITE_FILES`, `WRITE_PRIVATE_FILES`, `WRITE_VOLUME`: *f = Privilege(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "BROWSE", "CREATE", "CREATE_CATALOG", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) + return fmt.Errorf(`value "%s" is not one of "ACCESS", "ALL_PRIVILEGES", "APPLY_TAG", "BROWSE", "CREATE", "CREATE_CATALOG", "CREATE_CLEAN_ROOM", "CREATE_CONNECTION", "CREATE_EXTERNAL_LOCATION", "CREATE_EXTERNAL_TABLE", "CREATE_EXTERNAL_VOLUME", "CREATE_FOREIGN_CATALOG", "CREATE_FOREIGN_SECURABLE", "CREATE_FUNCTION", "CREATE_MANAGED_STORAGE", "CREATE_MATERIALIZED_VIEW", "CREATE_MODEL", "CREATE_PROVIDER", "CREATE_RECIPIENT", "CREATE_SCHEMA", "CREATE_SERVICE_CREDENTIAL", "CREATE_SHARE", "CREATE_STORAGE_CREDENTIAL", "CREATE_TABLE", "CREATE_VIEW", "CREATE_VOLUME", "EXECUTE", "EXECUTE_CLEAN_ROOM_TASK", "MANAGE", "MANAGE_ALLOWLIST", "MODIFY", "MODIFY_CLEAN_ROOM", "READ_FILES", "READ_PRIVATE_FILES", "READ_VOLUME", "REFRESH", "SELECT", "SET_SHARE_PERMISSION", "USAGE", "USE_CATALOG", "USE_CONNECTION", "USE_MARKETPLACE_ASSETS", "USE_PROVIDER", "USE_RECIPIENT", "USE_SCHEMA", "USE_SHARE", "WRITE_FILES", "WRITE_PRIVATE_FILES", "WRITE_VOLUME"`, v) } } @@ -4897,6 +4903,22 @@ type SetArtifactAllowlist struct { ArtifactMatchers []ArtifactMatcher `json:"artifact_matchers"` // The artifact type of the allowlist. ArtifactType ArtifactType `json:"-" url:"-"` + // Time at which this artifact allowlist was set, in epoch milliseconds. + CreatedAt int64 `json:"created_at,omitempty"` + // Username of the user who set the artifact allowlist. + CreatedBy string `json:"created_by,omitempty"` + // Unique identifier of parent metastore. + MetastoreId string `json:"metastore_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *SetArtifactAllowlist) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s SetArtifactAllowlist) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type SetRegisteredModelAliasRequest struct { diff --git a/service/cleanrooms/model.go b/service/cleanrooms/model.go index 7b7a441a0..468cbfa6c 100755 --- a/service/cleanrooms/model.go +++ b/service/cleanrooms/model.go @@ -534,7 +534,7 @@ func (s ComplianceSecurityProfile) MarshalJSON() ([]byte, error) { // Create an asset type CreateCleanRoomAssetRequest struct { // Metadata of the clean room asset - Asset *CleanRoomAsset `json:"asset,omitempty"` + Asset CleanRoomAsset `json:"asset"` // Name of the clean room. CleanRoomName string `json:"-" url:"-"` } @@ -544,7 +544,7 @@ type CreateCleanRoomOutputCatalogRequest struct { // Name of the clean room. CleanRoomName string `json:"-" url:"-"` - OutputCatalog *CleanRoomOutputCatalog `json:"output_catalog,omitempty"` + OutputCatalog CleanRoomOutputCatalog `json:"output_catalog"` } type CreateCleanRoomOutputCatalogResponse struct { @@ -553,7 +553,7 @@ type CreateCleanRoomOutputCatalogResponse struct { // Create a clean room type CreateCleanRoomRequest struct { - CleanRoom *CleanRoom `json:"clean_room,omitempty"` + CleanRoom CleanRoom `json:"clean_room"` } // Delete an asset @@ -716,7 +716,7 @@ func (s ListCleanRoomsResponse) MarshalJSON() ([]byte, error) { // Update an asset type UpdateCleanRoomAssetRequest struct { // Metadata of the clean room asset - Asset *CleanRoomAsset `json:"asset,omitempty"` + Asset CleanRoomAsset `json:"asset"` // The type of the asset. AssetType CleanRoomAssetAssetType `json:"-" url:"-"` // Name of the clean room. diff --git a/service/compute/model.go b/service/compute/model.go index 394488a1d..7125b7e2b 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -2747,9 +2747,12 @@ func (s EnforceClusterComplianceResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The environment entity used to preserve serverless environment side panel and -// jobs' environment for non-notebook task. In this minimal environment spec, -// only pip dependencies are supported. +// The environment entity used to preserve serverless environment side panel, +// jobs' environment for non-notebook task, and DLT's environment for classic +// and serverless pipelines. (Note: DLT uses a copied version of the Environment +// proto below, at +// //spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In +// this minimal environment spec, only pip dependencies are supported. type Environment struct { // Client version used by the environment The client is the user-facing // environment of the runtime. Each client comes with a specific set of @@ -5370,6 +5373,8 @@ const TerminationReasonCodeDbfsComponentUnhealthy TerminationReasonCode = `DBFS_ const TerminationReasonCodeDisasterRecoveryReplication TerminationReasonCode = `DISASTER_RECOVERY_REPLICATION` +const TerminationReasonCodeDnsResolutionError TerminationReasonCode = `DNS_RESOLUTION_ERROR` + const TerminationReasonCodeDockerContainerCreationException TerminationReasonCode = `DOCKER_CONTAINER_CREATION_EXCEPTION` const TerminationReasonCodeDockerImagePullFailure TerminationReasonCode = `DOCKER_IMAGE_PULL_FAILURE` @@ -5406,6 +5411,8 @@ const TerminationReasonCodeExecutorPodUnscheduled TerminationReasonCode = `EXECU const TerminationReasonCodeGcpApiRateQuotaExceeded TerminationReasonCode = `GCP_API_RATE_QUOTA_EXCEEDED` +const TerminationReasonCodeGcpDeniedByOrgPolicy TerminationReasonCode = `GCP_DENIED_BY_ORG_POLICY` + const TerminationReasonCodeGcpForbidden TerminationReasonCode = `GCP_FORBIDDEN` const TerminationReasonCodeGcpIamTimeout TerminationReasonCode = `GCP_IAM_TIMEOUT` @@ -5588,11 +5595,11 @@ func (f *TerminationReasonCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationReasonCode) Set(v string) error { switch v { - case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: + case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: *f = TerminationReasonCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) } } diff --git a/service/dashboards/model.go b/service/dashboards/model.go index a3f936531..bfef2675a 100755 --- a/service/dashboards/model.go +++ b/service/dashboards/model.go @@ -79,7 +79,7 @@ type CancelQueryExecutionResponseStatus struct { // Create dashboard type CreateDashboardRequest struct { - Dashboard *Dashboard `json:"dashboard,omitempty"` + Dashboard Dashboard `json:"dashboard"` } // Create dashboard schedule @@ -87,7 +87,7 @@ type CreateScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. DashboardId string `json:"-" url:"-"` - Schedule *Schedule `json:"schedule,omitempty"` + Schedule Schedule `json:"schedule"` } // Create schedule subscription @@ -97,7 +97,7 @@ type CreateSubscriptionRequest struct { // UUID identifying the schedule to which the subscription belongs. ScheduleId string `json:"-" url:"-"` - Subscription *Subscription `json:"subscription,omitempty"` + Subscription Subscription `json:"subscription"` } type CronSchedule struct { @@ -1340,7 +1340,7 @@ type UnpublishDashboardResponse struct { // Update dashboard type UpdateDashboardRequest struct { - Dashboard *Dashboard `json:"dashboard,omitempty"` + Dashboard Dashboard `json:"dashboard"` // UUID identifying the dashboard. DashboardId string `json:"-" url:"-"` } @@ -1350,7 +1350,7 @@ type UpdateScheduleRequest struct { // UUID identifying the dashboard to which the schedule belongs. DashboardId string `json:"-" url:"-"` - Schedule *Schedule `json:"schedule,omitempty"` + Schedule Schedule `json:"schedule"` // UUID identifying the schedule. ScheduleId string `json:"-" url:"-"` } diff --git a/service/jobs/model.go b/service/jobs/model.go index e3e23e7a8..ec3933531 100755 --- a/service/jobs/model.go +++ b/service/jobs/model.go @@ -651,9 +651,8 @@ type CreateJob struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` - // The performance mode on a serverless job. The performance target - // determines the level of compute performance or cost-efficiency for the - // run. + // The performance mode on a serverless job. This field determines the level + // of compute performance or cost-efficiency for the run. // // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times @@ -1574,9 +1573,12 @@ func (s JobEmailNotifications) MarshalJSON() ([]byte, error) { type JobEnvironment struct { // The key of an environment. It has to be unique within a job. EnvironmentKey string `json:"environment_key"` - // The environment entity used to preserve serverless environment side panel - // and jobs' environment for non-notebook task. In this minimal environment - // spec, only pip dependencies are supported. + // The environment entity used to preserve serverless environment side + // panel, jobs' environment for non-notebook task, and DLT's environment for + // classic and serverless pipelines. (Note: DLT uses a copied version of the + // Environment proto below, at + // //spark/pipelines/api/protos/copied/libraries-environments-copy.proto) In + // this minimal environment spec, only pip dependencies are supported. Spec *compute.Environment `json:"spec,omitempty"` } @@ -1812,9 +1814,8 @@ type JobSettings struct { NotificationSettings *JobNotificationSettings `json:"notification_settings,omitempty"` // Job-level parameter definitions Parameters []JobParameterDefinition `json:"parameters,omitempty"` - // The performance mode on a serverless job. The performance target - // determines the level of compute performance or cost-efficiency for the - // run. + // The performance mode on a serverless job. This field determines the level + // of compute performance or cost-efficiency for the run. // // * `STANDARD`: Enables cost-efficient execution of serverless workloads. * // `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times diff --git a/service/oauth2/model.go b/service/oauth2/model.go index 3d0fb78de..9782ef127 100755 --- a/service/oauth2/model.go +++ b/service/oauth2/model.go @@ -8,7 +8,7 @@ import ( // Create account federation policy type CreateAccountFederationPolicyRequest struct { - Policy *FederationPolicy `json:"policy,omitempty"` + Policy FederationPolicy `json:"policy"` // The identifier for the federation policy. The identifier must contain // only lowercase alphanumeric characters, numbers, hyphens, and slashes. If // unspecified, the id will be assigned by Databricks. @@ -109,7 +109,7 @@ func (s CreatePublishedAppIntegrationOutput) MarshalJSON() ([]byte, error) { // Create service principal federation policy type CreateServicePrincipalFederationPolicyRequest struct { - Policy *FederationPolicy `json:"policy,omitempty"` + Policy FederationPolicy `json:"policy"` // The identifier for the federation policy. The identifier must contain // only lowercase alphanumeric characters, numbers, hyphens, and slashes. If // unspecified, the id will be assigned by Databricks. @@ -656,7 +656,7 @@ func (s TokenAccessPolicy) MarshalJSON() ([]byte, error) { // Update account federation policy type UpdateAccountFederationPolicyRequest struct { - Policy *FederationPolicy `json:"policy,omitempty"` + Policy FederationPolicy `json:"policy"` // The identifier for the federation policy. PolicyId string `json:"-" url:"-"` // The field mask specifies which fields of the policy to update. To specify @@ -709,7 +709,7 @@ type UpdatePublishedAppIntegrationOutput struct { // Update service principal federation policy type UpdateServicePrincipalFederationPolicyRequest struct { - Policy *FederationPolicy `json:"policy,omitempty"` + Policy FederationPolicy `json:"policy"` // The identifier for the federation policy. PolicyId string `json:"-" url:"-"` // The service principal id for the federation policy. diff --git a/service/pkg.go b/service/pkg.go index aaefe6bdf..1d327c7dc 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -14,6 +14,8 @@ // // - [sql.AlertsLegacyAPI]: The alerts API can be used to perform CRUD operations on alerts. // +// - [sql.AlertsV2API]: TODO: Add description. +// // - [apps.AppsAPI]: Apps run directly on a customer’s Databricks instance, integrate with their data, use and extend Databricks services, and enable users to interact through single sign-on. // // - [catalog.ArtifactAllowlistsAPI]: In Databricks Runtime 13.3 and above, you can add libraries and init scripts to the `allowlist` in UC so that users can leverage these artifacts on compute configured with shared access mode. @@ -326,6 +328,7 @@ var ( _ *settings.AibiDashboardEmbeddingApprovedDomainsAPI = nil _ *sql.AlertsAPI = nil _ *sql.AlertsLegacyAPI = nil + _ *sql.AlertsV2API = nil _ *apps.AppsAPI = nil _ *catalog.ArtifactAllowlistsAPI = nil _ *settings.AutomaticClusterUpdateAPI = nil @@ -345,8 +348,8 @@ var ( _ *marketplace.ConsumerListingsAPI = nil _ *marketplace.ConsumerPersonalizationRequestsAPI = nil _ *marketplace.ConsumerProvidersAPI = nil - _ *provisioning.CredentialsAPI = nil _ *catalog.CredentialsAPI = nil + _ *provisioning.CredentialsAPI = nil _ *settings.CredentialsManagerAPI = nil _ *settings.CspEnablementAccountAPI = nil _ *iam.CurrentUserAPI = nil diff --git a/service/serving/model.go b/service/serving/model.go index 502c8698c..f4f890c1c 100755 --- a/service/serving/model.go +++ b/service/serving/model.go @@ -1561,8 +1561,9 @@ type ServedEntityInput struct { // single unit of provisioned concurrency can process one request at a time. // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 - // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size is 0. + // provisioned concurrency). Additional custom workload sizes can also be + // used when available in the workspace. If scale-to-zero is enabled, the + // lower bound of the provisioned concurrency for each workload size is 0. WorkloadSize string `json:"workload_size,omitempty"` // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this @@ -1638,8 +1639,9 @@ type ServedEntityOutput struct { // single unit of provisioned concurrency can process one request at a time. // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 - // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size is 0. + // provisioned concurrency). Additional custom workload sizes can also be + // used when available in the workspace. If scale-to-zero is enabled, the + // lower bound of the provisioned concurrency for each workload size is 0. WorkloadSize string `json:"workload_size,omitempty"` // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this @@ -1717,9 +1719,10 @@ type ServedModelInput struct { // single unit of provisioned concurrency can process one request at a time. // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 - // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size is 0. - WorkloadSize ServedModelInputWorkloadSize `json:"workload_size,omitempty"` + // provisioned concurrency). Additional custom workload sizes can also be + // used when available in the workspace. If scale-to-zero is enabled, the + // lower bound of the provisioned concurrency for each workload size is 0. + WorkloadSize string `json:"workload_size,omitempty"` // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this // parameter is "CPU". For deep learning workloads, GPU acceleration is @@ -1740,35 +1743,6 @@ func (s ServedModelInput) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type ServedModelInputWorkloadSize string - -const ServedModelInputWorkloadSizeLarge ServedModelInputWorkloadSize = `Large` - -const ServedModelInputWorkloadSizeMedium ServedModelInputWorkloadSize = `Medium` - -const ServedModelInputWorkloadSizeSmall ServedModelInputWorkloadSize = `Small` - -// String representation for [fmt.Print] -func (f *ServedModelInputWorkloadSize) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *ServedModelInputWorkloadSize) Set(v string) error { - switch v { - case `Large`, `Medium`, `Small`: - *f = ServedModelInputWorkloadSize(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "Large", "Medium", "Small"`, v) - } -} - -// Type always returns ServedModelInputWorkloadSize to satisfy [pflag.Value] interface -func (f *ServedModelInputWorkloadSize) Type() string { - return "ServedModelInputWorkloadSize" -} - // Please keep this in sync with with workload types in // InferenceEndpointEntities.scala type ServedModelInputWorkloadType string @@ -1838,8 +1812,9 @@ type ServedModelOutput struct { // single unit of provisioned concurrency can process one request at a time. // Valid workload sizes are "Small" (4 - 4 provisioned concurrency), // "Medium" (8 - 16 provisioned concurrency), and "Large" (16 - 64 - // provisioned concurrency). If scale-to-zero is enabled, the lower bound of - // the provisioned concurrency for each workload size is 0. + // provisioned concurrency). Additional custom workload sizes can also be + // used when available in the workspace. If scale-to-zero is enabled, the + // lower bound of the provisioned concurrency for each workload size is 0. WorkloadSize string `json:"workload_size,omitempty"` // The workload type of the served entity. The workload type selects which // type of compute to use in the endpoint. The default value for this diff --git a/service/settings/api.go b/service/settings/api.go index cb9a7f1ff..808e67ebd 100755 --- a/service/settings/api.go +++ b/service/settings/api.go @@ -1110,6 +1110,20 @@ func (a *IpAccessListsAPI) GetByLabel(ctx context.Context, name string) (*IpAcce type NetworkConnectivityInterface interface { // Create a network connectivity configuration. + // + // Creates a network connectivity configuration (NCC), which provides stable + // Azure service subnets when accessing your Azure Storage accounts. You can + // also use a network connectivity configuration to create Databricks managed + // private endpoints so that Databricks serverless compute resources privately + // access your resources. + // + // **IMPORTANT**: After you create the network connectivity configuration, you + // must assign one or more workspaces to the new network connectivity + // configuration. You can share one network connectivity configuration with + // multiple workspaces from the same Azure region within the same Databricks + // account. See [configure serverless secure connectivity]. + // + // [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) // Create a private endpoint rule. @@ -1166,12 +1180,12 @@ type NetworkConnectivityInterface interface { // Gets a network connectivity configuration. GetNetworkConnectivityConfigurationByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*NetworkConnectivityConfiguration, error) - // Get a private endpoint rule. + // Gets a private endpoint rule. // // Gets the private endpoint rule. GetPrivateEndpointRule(ctx context.Context, request GetPrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) - // Get a private endpoint rule. + // Gets a private endpoint rule. // // Gets the private endpoint rule. GetPrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) @@ -1208,6 +1222,12 @@ type NetworkConnectivityInterface interface { // // Gets an array of private endpoint rules. ListPrivateEndpointRulesByNetworkConnectivityConfigId(ctx context.Context, networkConnectivityConfigId string) (*ListNccAzurePrivateEndpointRulesResponse, error) + + // Update a private endpoint rule. + // + // Updates a private endpoint rule. Currently only a private endpoint rule to + // customer-managed resources is allowed to be updated. + UpdateNccAzurePrivateEndpointRulePublic(ctx context.Context, request UpdateNccAzurePrivateEndpointRulePublicRequest) (*NccAzurePrivateEndpointRule, error) } func NewNetworkConnectivity(client *client.DatabricksClient) *NetworkConnectivityAPI { @@ -1219,7 +1239,14 @@ func NewNetworkConnectivity(client *client.DatabricksClient) *NetworkConnectivit } // These APIs provide configurations for the network connectivity of your -// workspaces for serverless compute resources. +// workspaces for serverless compute resources. This API provides stable subnets +// for your workspace so that you can configure your firewalls on your Azure +// Storage accounts to allow access from Databricks. You can also use the API to +// provision private endpoints for Databricks to privately connect serverless +// compute resources to your Azure resources using Azure Private Link. See +// [configure serverless secure connectivity]. +// +// [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security type NetworkConnectivityAPI struct { networkConnectivityImpl } @@ -1257,7 +1284,7 @@ func (a *NetworkConnectivityAPI) GetNetworkConnectivityConfigurationByNetworkCon }) } -// Get a private endpoint rule. +// Gets a private endpoint rule. // // Gets the private endpoint rule. func (a *NetworkConnectivityAPI) GetPrivateEndpointRuleByNetworkConnectivityConfigIdAndPrivateEndpointRuleId(ctx context.Context, networkConnectivityConfigId string, privateEndpointRuleId string) (*NccAzurePrivateEndpointRule, error) { diff --git a/service/settings/impl.go b/service/settings/impl.go index 7590bdf0d..d91e4d9be 100755 --- a/service/settings/impl.go +++ b/service/settings/impl.go @@ -695,7 +695,7 @@ func (a *networkConnectivityImpl) CreateNetworkConnectivityConfiguration(ctx con headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &networkConnectivityConfiguration) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.NetworkConnectivityConfig, &networkConnectivityConfiguration) return &networkConnectivityConfiguration, err } @@ -706,7 +706,7 @@ func (a *networkConnectivityImpl) CreatePrivateEndpointRule(ctx context.Context, headers := make(map[string]string) headers["Accept"] = "application/json" headers["Content-Type"] = "application/json" - err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &nccAzurePrivateEndpointRule) + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request.PrivateEndpointRule, &nccAzurePrivateEndpointRule) return &nccAzurePrivateEndpointRule, err } @@ -840,6 +840,20 @@ func (a *networkConnectivityImpl) internalListPrivateEndpointRules(ctx context.C return &listNccAzurePrivateEndpointRulesResponse, err } +func (a *networkConnectivityImpl) UpdateNccAzurePrivateEndpointRulePublic(ctx context.Context, request UpdateNccAzurePrivateEndpointRulePublicRequest) (*NccAzurePrivateEndpointRule, error) { + var nccAzurePrivateEndpointRule NccAzurePrivateEndpointRule + path := fmt.Sprintf("/api/2.0/accounts/%v/network-connectivity-configs/%v/private-endpoint-rules/%v", a.client.ConfiguredAccountID(), request.NetworkConnectivityConfigId, request.PrivateEndpointRuleId) + queryParams := make(map[string]any) + if request.UpdateMask != "" { + queryParams["update_mask"] = request.UpdateMask + } + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request.PrivateEndpointRule, &nccAzurePrivateEndpointRule) + return &nccAzurePrivateEndpointRule, err +} + // unexported type that holds implementations of just NotificationDestinations API methods type notificationDestinationsImpl struct { client *client.DatabricksClient diff --git a/service/settings/interface.go b/service/settings/interface.go index 8042e91ae..cfed01c9e 100755 --- a/service/settings/interface.go +++ b/service/settings/interface.go @@ -562,10 +562,31 @@ type IpAccessListsService interface { } // These APIs provide configurations for the network connectivity of your -// workspaces for serverless compute resources. +// workspaces for serverless compute resources. This API provides stable subnets +// for your workspace so that you can configure your firewalls on your Azure +// Storage accounts to allow access from Databricks. You can also use the API to +// provision private endpoints for Databricks to privately connect serverless +// compute resources to your Azure resources using Azure Private Link. See +// [configure serverless secure connectivity]. +// +// [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security type NetworkConnectivityService interface { // Create a network connectivity configuration. + // + // Creates a network connectivity configuration (NCC), which provides stable + // Azure service subnets when accessing your Azure Storage accounts. You can + // also use a network connectivity configuration to create Databricks + // managed private endpoints so that Databricks serverless compute resources + // privately access your resources. + // + // **IMPORTANT**: After you create the network connectivity configuration, + // you must assign one or more workspaces to the new network connectivity + // configuration. You can share one network connectivity configuration with + // multiple workspaces from the same Azure region within the same Databricks + // account. See [configure serverless secure connectivity]. + // + // [configure serverless secure connectivity]: https://learn.microsoft.com/azure/databricks/security/network/serverless-network-security CreateNetworkConnectivityConfiguration(ctx context.Context, request CreateNetworkConnectivityConfigRequest) (*NetworkConnectivityConfiguration, error) // Create a private endpoint rule. @@ -602,7 +623,7 @@ type NetworkConnectivityService interface { // Gets a network connectivity configuration. GetNetworkConnectivityConfiguration(ctx context.Context, request GetNetworkConnectivityConfigurationRequest) (*NetworkConnectivityConfiguration, error) - // Get a private endpoint rule. + // Gets a private endpoint rule. // // Gets the private endpoint rule. GetPrivateEndpointRule(ctx context.Context, request GetPrivateEndpointRuleRequest) (*NccAzurePrivateEndpointRule, error) @@ -620,6 +641,12 @@ type NetworkConnectivityService interface { // // Use ListPrivateEndpointRulesAll() to get all NccAzurePrivateEndpointRule instances, which will iterate over every result page. ListPrivateEndpointRules(ctx context.Context, request ListPrivateEndpointRulesRequest) (*ListNccAzurePrivateEndpointRulesResponse, error) + + // Update a private endpoint rule. + // + // Updates a private endpoint rule. Currently only a private endpoint rule + // to customer-managed resources is allowed to be updated. + UpdateNccAzurePrivateEndpointRulePublic(ctx context.Context, request UpdateNccAzurePrivateEndpointRulePublicRequest) (*NccAzurePrivateEndpointRule, error) } // The notification destinations API lets you programmatically manage a diff --git a/service/settings/model.go b/service/settings/model.go index 8775aaa34..a3f5747eb 100755 --- a/service/settings/model.go +++ b/service/settings/model.go @@ -455,11 +455,18 @@ type CreateIpAccessListResponse struct { IpAccessList *IpAccessListInfo `json:"ip_access_list,omitempty"` } +// Create a network connectivity configuration type CreateNetworkConnectivityConfigRequest struct { + // Properties of the new network connectivity configuration. + NetworkConnectivityConfig CreateNetworkConnectivityConfiguration `json:"network_connectivity_config"` +} + +// Properties of the new network connectivity configuration. +type CreateNetworkConnectivityConfiguration struct { // The name of the network connectivity configuration. The name can contain // alphanumeric characters, hyphens, and underscores. The length must be // between 3 and 30 characters. The name must match the regular expression - // `^[0-9a-zA-Z-_]{3,30}$`. + // ^[0-9a-zA-Z-_]{3,30}$ Name string `json:"name"` // The region for the network connectivity configuration. Only workspaces in // the same region can be attached to the network connectivity @@ -522,49 +529,42 @@ func (s CreateOboTokenResponse) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type CreatePrivateEndpointRuleRequest struct { +// Properties of the new private endpoint rule. Note that you must approve the +// endpoint in Azure portal after initialization. +type CreatePrivateEndpointRule struct { + // Only used by private endpoints to customer-managed resources. + // + // Domain names of target private link service. When updating this field, + // the full list of target domain_names must be specified. + DomainNames []string `json:"domain_names,omitempty"` + // Only used by private endpoints to Azure first-party services. Enum: blob + // | dfs | sqlServer | mysqlServer + // // The sub-resource type (group ID) of the target resource. Note that to // connect to workspace root storage (root DBFS), you need two endpoints, - // one for `blob` and one for `dfs`. - GroupId CreatePrivateEndpointRuleRequestGroupId `json:"group_id"` - // Your Network Connectvity Configuration ID. - NetworkConnectivityConfigId string `json:"-" url:"-"` + // one for blob and one for dfs. + GroupId string `json:"group_id,omitempty"` // The Azure resource ID of the target resource. ResourceId string `json:"resource_id"` -} - -// The sub-resource type (group ID) of the target resource. Note that to connect -// to workspace root storage (root DBFS), you need two endpoints, one for `blob` -// and one for `dfs`. -type CreatePrivateEndpointRuleRequestGroupId string - -const CreatePrivateEndpointRuleRequestGroupIdBlob CreatePrivateEndpointRuleRequestGroupId = `blob` -const CreatePrivateEndpointRuleRequestGroupIdDfs CreatePrivateEndpointRuleRequestGroupId = `dfs` - -const CreatePrivateEndpointRuleRequestGroupIdMysqlServer CreatePrivateEndpointRuleRequestGroupId = `mysqlServer` - -const CreatePrivateEndpointRuleRequestGroupIdSqlServer CreatePrivateEndpointRuleRequestGroupId = `sqlServer` + ForceSendFields []string `json:"-" url:"-"` +} -// String representation for [fmt.Print] -func (f *CreatePrivateEndpointRuleRequestGroupId) String() string { - return string(*f) +func (s *CreatePrivateEndpointRule) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) } -// Set raw string value and validate it against allowed values -func (f *CreatePrivateEndpointRuleRequestGroupId) Set(v string) error { - switch v { - case `blob`, `dfs`, `mysqlServer`, `sqlServer`: - *f = CreatePrivateEndpointRuleRequestGroupId(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "blob", "dfs", "mysqlServer", "sqlServer"`, v) - } +func (s CreatePrivateEndpointRule) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } -// Type always returns CreatePrivateEndpointRuleRequestGroupId to satisfy [pflag.Value] interface -func (f *CreatePrivateEndpointRuleRequestGroupId) Type() string { - return "CreatePrivateEndpointRuleRequestGroupId" +// Create a private endpoint rule +type CreatePrivateEndpointRuleRequest struct { + // Your Network Connectivity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` + // Properties of the new private endpoint rule. Note that you must approve + // the endpoint in Azure portal after initialization. + PrivateEndpointRule CreatePrivateEndpointRule `json:"private_endpoint_rule"` } type CreateTokenRequest struct { @@ -941,7 +941,7 @@ type DeleteIpAccessListRequest struct { // Delete a network connectivity configuration type DeleteNetworkConnectivityConfigurationRequest struct { - // Your Network Connectvity Configuration ID. + // Your Network Connectivity Configuration ID. NetworkConnectivityConfigId string `json:"-" url:"-"` } @@ -1416,6 +1416,35 @@ func (f *EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestina return "EgressNetworkPolicyInternetAccessPolicyStorageDestinationStorageDestinationType" } +// The target resources that are supported by Network Connectivity Config. Note: +// some egress types can support general types that are not defined in +// EgressResourceType. E.g.: Azure private endpoint supports private link +// enabled Azure services. +type EgressResourceType string + +const EgressResourceTypeAzureBlobStorage EgressResourceType = `AZURE_BLOB_STORAGE` + +// String representation for [fmt.Print] +func (f *EgressResourceType) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *EgressResourceType) Set(v string) error { + switch v { + case `AZURE_BLOB_STORAGE`: + *f = EgressResourceType(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AZURE_BLOB_STORAGE"`, v) + } +} + +// Type always returns EgressResourceType to satisfy [pflag.Value] interface +func (f *EgressResourceType) Type() string { + return "EgressResourceType" +} + type EmailConfig struct { // Email addresses to notify. Addresses []string `json:"addresses,omitempty"` @@ -1931,7 +1960,7 @@ type GetIpAccessListsResponse struct { // Get a network connectivity configuration type GetNetworkConnectivityConfigurationRequest struct { - // Your Network Connectvity Configuration ID. + // Your Network Connectivity Configuration ID. NetworkConnectivityConfigId string `json:"-" url:"-"` } @@ -1962,7 +1991,7 @@ func (s GetPersonalComputeSettingRequest) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// Get a private endpoint rule +// Gets a private endpoint rule type GetPrivateEndpointRuleRequest struct { // Your Network Connectvity Configuration ID. NetworkConnectivityConfigId string `json:"-" url:"-"` @@ -2057,6 +2086,7 @@ type ListIpAccessListResponse struct { IpAccessLists []IpAccessListInfo `json:"ip_access_lists,omitempty"` } +// The private endpoint rule list was successfully retrieved. type ListNccAzurePrivateEndpointRulesResponse struct { Items []NccAzurePrivateEndpointRule `json:"items,omitempty"` // A token that can be used to get the next page of results. If null, there @@ -2090,6 +2120,7 @@ func (s ListNetworkConnectivityConfigurationsRequest) MarshalJSON() ([]byte, err return marshal.Marshal(s) } +// The network connectivity configuration list was successfully retrieved. type ListNetworkConnectivityConfigurationsResponse struct { Items []NetworkConnectivityConfiguration `json:"items,omitempty"` // A token that can be used to get the next page of results. If null, there @@ -2268,20 +2299,21 @@ type NccAwsStableIpRule struct { CidrBlocks []string `json:"cidr_blocks,omitempty"` } +// Properties of the new private endpoint rule. Note that you must approve the +// endpoint in Azure portal after initialization. type NccAzurePrivateEndpointRule struct { // The current status of this private endpoint. The private endpoint rules - // are effective only if the connection state is `ESTABLISHED`. Remember - // that you must approve new endpoints on your resources in the Azure portal - // before they take effect. - // - // The possible values are: - INIT: (deprecated) The endpoint has been - // created and pending approval. - PENDING: The endpoint has been created - // and pending approval. - ESTABLISHED: The endpoint has been approved and - // is ready to use in your serverless compute resources. - REJECTED: - // Connection was rejected by the private link resource owner. - - // DISCONNECTED: Connection was removed by the private link resource owner, - // the private endpoint becomes informative and should be deleted for - // clean-up. + // are effective only if the connection state is ESTABLISHED. Remember that + // you must approve new endpoints on your resources in the Azure portal + // before they take effect. The possible values are: - INIT: (deprecated) + // The endpoint has been created and pending approval. - PENDING: The + // endpoint has been created and pending approval. - ESTABLISHED: The + // endpoint has been approved and is ready to use in your serverless compute + // resources. - REJECTED: Connection was rejected by the private link + // resource owner. - DISCONNECTED: Connection was removed by the private + // link resource owner, the private endpoint becomes informative and should + // be deleted for clean-up. - EXPIRED: If the endpoint was created but not + // approved in 14 days, it will be EXPIRED. ConnectionState NccAzurePrivateEndpointRuleConnectionState `json:"connection_state,omitempty"` // Time in epoch milliseconds when this object was created. CreationTime int64 `json:"creation_time,omitempty"` @@ -2289,12 +2321,20 @@ type NccAzurePrivateEndpointRule struct { Deactivated bool `json:"deactivated,omitempty"` // Time in epoch milliseconds when this object was deactivated. DeactivatedAt int64 `json:"deactivated_at,omitempty"` + // Only used by private endpoints to customer-managed resources. + // + // Domain names of target private link service. When updating this field, + // the full list of target domain_names must be specified. + DomainNames []string `json:"domain_names,omitempty"` // The name of the Azure private endpoint resource. EndpointName string `json:"endpoint_name,omitempty"` + // Only used by private endpoints to Azure first-party services. Enum: blob + // | dfs | sqlServer | mysqlServer + // // The sub-resource type (group ID) of the target resource. Note that to // connect to workspace root storage (root DBFS), you need two endpoints, - // one for `blob` and one for `dfs`. - GroupId NccAzurePrivateEndpointRuleGroupId `json:"group_id,omitempty"` + // one for blob and one for dfs. + GroupId string `json:"group_id,omitempty"` // The ID of a network connectivity configuration, which is the parent // resource of this private endpoint rule object. NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` @@ -2316,24 +2356,14 @@ func (s NccAzurePrivateEndpointRule) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The current status of this private endpoint. The private endpoint rules are -// effective only if the connection state is `ESTABLISHED`. Remember that you -// must approve new endpoints on your resources in the Azure portal before they -// take effect. -// -// The possible values are: - INIT: (deprecated) The endpoint has been created -// and pending approval. - PENDING: The endpoint has been created and pending -// approval. - ESTABLISHED: The endpoint has been approved and is ready to use -// in your serverless compute resources. - REJECTED: Connection was rejected by -// the private link resource owner. - DISCONNECTED: Connection was removed by -// the private link resource owner, the private endpoint becomes informative and -// should be deleted for clean-up. type NccAzurePrivateEndpointRuleConnectionState string const NccAzurePrivateEndpointRuleConnectionStateDisconnected NccAzurePrivateEndpointRuleConnectionState = `DISCONNECTED` const NccAzurePrivateEndpointRuleConnectionStateEstablished NccAzurePrivateEndpointRuleConnectionState = `ESTABLISHED` +const NccAzurePrivateEndpointRuleConnectionStateExpired NccAzurePrivateEndpointRuleConnectionState = `EXPIRED` + const NccAzurePrivateEndpointRuleConnectionStateInit NccAzurePrivateEndpointRuleConnectionState = `INIT` const NccAzurePrivateEndpointRuleConnectionStatePending NccAzurePrivateEndpointRuleConnectionState = `PENDING` @@ -2348,11 +2378,11 @@ func (f *NccAzurePrivateEndpointRuleConnectionState) String() string { // Set raw string value and validate it against allowed values func (f *NccAzurePrivateEndpointRuleConnectionState) Set(v string) error { switch v { - case `DISCONNECTED`, `ESTABLISHED`, `INIT`, `PENDING`, `REJECTED`: + case `DISCONNECTED`, `ESTABLISHED`, `EXPIRED`, `INIT`, `PENDING`, `REJECTED`: *f = NccAzurePrivateEndpointRuleConnectionState(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "ESTABLISHED", "INIT", "PENDING", "REJECTED"`, v) + return fmt.Errorf(`value "%s" is not one of "DISCONNECTED", "ESTABLISHED", "EXPIRED", "INIT", "PENDING", "REJECTED"`, v) } } @@ -2361,40 +2391,6 @@ func (f *NccAzurePrivateEndpointRuleConnectionState) Type() string { return "NccAzurePrivateEndpointRuleConnectionState" } -// The sub-resource type (group ID) of the target resource. Note that to connect -// to workspace root storage (root DBFS), you need two endpoints, one for `blob` -// and one for `dfs`. -type NccAzurePrivateEndpointRuleGroupId string - -const NccAzurePrivateEndpointRuleGroupIdBlob NccAzurePrivateEndpointRuleGroupId = `blob` - -const NccAzurePrivateEndpointRuleGroupIdDfs NccAzurePrivateEndpointRuleGroupId = `dfs` - -const NccAzurePrivateEndpointRuleGroupIdMysqlServer NccAzurePrivateEndpointRuleGroupId = `mysqlServer` - -const NccAzurePrivateEndpointRuleGroupIdSqlServer NccAzurePrivateEndpointRuleGroupId = `sqlServer` - -// String representation for [fmt.Print] -func (f *NccAzurePrivateEndpointRuleGroupId) String() string { - return string(*f) -} - -// Set raw string value and validate it against allowed values -func (f *NccAzurePrivateEndpointRuleGroupId) Set(v string) error { - switch v { - case `blob`, `dfs`, `mysqlServer`, `sqlServer`: - *f = NccAzurePrivateEndpointRuleGroupId(v) - return nil - default: - return fmt.Errorf(`value "%s" is not one of "blob", "dfs", "mysqlServer", "sqlServer"`, v) - } -} - -// Type always returns NccAzurePrivateEndpointRuleGroupId to satisfy [pflag.Value] interface -func (f *NccAzurePrivateEndpointRuleGroupId) Type() string { - return "NccAzurePrivateEndpointRuleGroupId" -} - // The stable Azure service endpoints. You can configure the firewall of your // Azure resources to allow traffic from your Databricks serverless compute // resources. @@ -2402,10 +2398,10 @@ type NccAzureServiceEndpointRule struct { // The list of subnets from which Databricks network traffic originates when // accessing your Azure resources. Subnets []string `json:"subnets,omitempty"` - // The Azure region in which this service endpoint rule applies. + // The Azure region in which this service endpoint rule applies.. TargetRegion string `json:"target_region,omitempty"` // The Azure services to which this service endpoint rule applies to. - TargetServices []string `json:"target_services,omitempty"` + TargetServices []EgressResourceType `json:"target_services,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -2418,8 +2414,6 @@ func (s NccAzureServiceEndpointRule) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -// The network connectivity rules that apply to network traffic from your -// serverless compute resources. type NccEgressConfig struct { // The network connectivity rules that are applied by default without // resource specific configurations. You can find the stable network @@ -2430,9 +2424,7 @@ type NccEgressConfig struct { TargetRules *NccEgressTargetRules `json:"target_rules,omitempty"` } -// The network connectivity rules that are applied by default without resource -// specific configurations. You can find the stable network information of your -// serverless compute resources here. +// Default rules don't have specific targets. type NccEgressDefaultRules struct { // The stable AWS IP CIDR blocks. You can use these to configure the // firewall of your resources to allow traffic from your Databricks @@ -2444,12 +2436,13 @@ type NccEgressDefaultRules struct { AzureServiceEndpointRule *NccAzureServiceEndpointRule `json:"azure_service_endpoint_rule,omitempty"` } -// The network connectivity rules that configured for each destinations. These -// rules override default rules. +// Target rule controls the egress rules that are dedicated to specific +// resources. type NccEgressTargetRules struct { AzurePrivateEndpointRules []NccAzurePrivateEndpointRule `json:"azure_private_endpoint_rules,omitempty"` } +// Properties of the new network connectivity configuration. type NetworkConnectivityConfiguration struct { // The Databricks account ID that hosts the credential. AccountId string `json:"account_id,omitempty"` @@ -2461,7 +2454,7 @@ type NetworkConnectivityConfiguration struct { // The name of the network connectivity configuration. The name can contain // alphanumeric characters, hyphens, and underscores. The length must be // between 3 and 30 characters. The name must match the regular expression - // `^[0-9a-zA-Z-_]{3,30}$`. + // ^[0-9a-zA-Z-_]{3,30}$ Name string `json:"name,omitempty"` // Databricks network connectivity configuration ID. NetworkConnectivityConfigId string `json:"network_connectivity_config_id,omitempty"` @@ -3306,6 +3299,24 @@ func (s UpdateIpAccessList) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// Update a private endpoint rule +type UpdateNccAzurePrivateEndpointRulePublicRequest struct { + // Your Network Connectivity Configuration ID. + NetworkConnectivityConfigId string `json:"-" url:"-"` + // Properties of the new private endpoint rule. Note that you must approve + // the endpoint in Azure portal after initialization. + PrivateEndpointRule UpdatePrivateEndpointRule `json:"private_endpoint_rule"` + // Your private endpoint rule ID. + PrivateEndpointRuleId string `json:"-" url:"-"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + UpdateMask string `json:"-" url:"update_mask"` +} + type UpdateNotificationDestinationRequest struct { // The configuration for the notification destination. Must wrap EXACTLY one // of the nested configs. @@ -3347,6 +3358,16 @@ type UpdatePersonalComputeSettingRequest struct { Setting PersonalComputeSetting `json:"setting"` } +// Properties of the new private endpoint rule. Note that you must approve the +// endpoint in Azure portal after initialization. +type UpdatePrivateEndpointRule struct { + // Only used by private endpoints to customer-managed resources. + // + // Domain names of target private link service. When updating this field, + // the full list of target domain_names must be specified. + DomainNames []string `json:"domain_names,omitempty"` +} + type UpdateResponse struct { } diff --git a/service/sql/api.go b/service/sql/api.go index 409dc26c3..a41db187c 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -1,6 +1,6 @@ // Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT. -// These APIs allow you to manage Alerts, Alerts Legacy, Dashboard Widgets, Dashboards, Data Sources, Dbsql Permissions, Queries, Queries Legacy, Query History, Query Visualizations, Query Visualizations Legacy, Redash Config, Statement Execution, Warehouses, etc. +// These APIs allow you to manage Alerts, Alerts Legacy, Alerts V2, Dashboard Widgets, Dashboards, Data Sources, Dbsql Permissions, Queries, Queries Legacy, Query History, Query Visualizations, Query Visualizations Legacy, Redash Config, Statement Execution, Warehouses, etc. package sql import ( @@ -379,6 +379,161 @@ func (a *AlertsLegacyAPI) GetByName(ctx context.Context, name string) (*LegacyAl return &alternatives[0], nil } +type AlertsV2Interface interface { + + // Create an alert. + // + // Create Alert + CreateAlert(ctx context.Context, request CreateAlertV2Request) (*AlertV2, error) + + // Get an alert. + // + // Gets an alert. + GetAlert(ctx context.Context, request GetAlertV2Request) (*AlertV2, error) + + // Get an alert. + // + // Gets an alert. + GetAlertById(ctx context.Context, id string) (*AlertV2, error) + + // List alerts. + // + // Gets a list of alerts accessible to the user, ordered by creation time. + // + // This method is generated by Databricks SDK Code Generator. + ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[ListAlertsV2ResponseAlert] + + // List alerts. + // + // Gets a list of alerts accessible to the user, ordered by creation time. + // + // This method is generated by Databricks SDK Code Generator. + ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]ListAlertsV2ResponseAlert, error) + + // ListAlertsV2ResponseAlertDisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [ListAlertsV2ResponseAlert].DisplayName as key and [ListAlertsV2ResponseAlert].Id as value. + // + // Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. + // + // Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before creating a map. + // + // This method is generated by Databricks SDK Code Generator. + ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) + + // GetByDisplayName calls [AlertsV2API.ListAlertsV2ResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsV2ResponseAlert]. + // + // Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. + // + // Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before returning matching by name. + // + // This method is generated by Databricks SDK Code Generator. + GetByDisplayName(ctx context.Context, name string) (*ListAlertsV2ResponseAlert, error) + + // Delete an alert. + // + // Moves an alert to the trash. Trashed alerts immediately disappear from list + // views, and can no longer trigger. You can restore a trashed alert through the + // UI. A trashed alert is permanently deleted after 30 days. + TrashAlert(ctx context.Context, request TrashAlertV2Request) error + + // Delete an alert. + // + // Moves an alert to the trash. Trashed alerts immediately disappear from list + // views, and can no longer trigger. You can restore a trashed alert through the + // UI. A trashed alert is permanently deleted after 30 days. + TrashAlertById(ctx context.Context, id string) error + + // Update an alert. + // + // Update alert + UpdateAlert(ctx context.Context, request UpdateAlertV2Request) (*AlertV2, error) +} + +func NewAlertsV2(client *client.DatabricksClient) *AlertsV2API { + return &AlertsV2API{ + alertsV2Impl: alertsV2Impl{ + client: client, + }, + } +} + +// TODO: Add description +type AlertsV2API struct { + alertsV2Impl +} + +// Get an alert. +// +// Gets an alert. +func (a *AlertsV2API) GetAlertById(ctx context.Context, id string) (*AlertV2, error) { + return a.alertsV2Impl.GetAlert(ctx, GetAlertV2Request{ + Id: id, + }) +} + +// ListAlertsV2ResponseAlertDisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [ListAlertsV2ResponseAlert].DisplayName as key and [ListAlertsV2ResponseAlert].Id as value. +// +// Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. +// +// Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before creating a map. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AlertsV2API) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") + mapping := map[string]string{} + result, err := a.ListAlertsAll(ctx, request) + if err != nil { + return nil, err + } + for _, v := range result { + key := v.DisplayName + _, duplicate := mapping[key] + if duplicate { + return nil, fmt.Errorf("duplicate .DisplayName: %s", key) + } + mapping[key] = v.Id + } + return mapping, nil +} + +// GetByDisplayName calls [AlertsV2API.ListAlertsV2ResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsV2ResponseAlert]. +// +// Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. +// +// Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before returning matching by name. +// +// This method is generated by Databricks SDK Code Generator. +func (a *AlertsV2API) GetByDisplayName(ctx context.Context, name string) (*ListAlertsV2ResponseAlert, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") + result, err := a.ListAlertsAll(ctx, ListAlertsV2Request{}) + if err != nil { + return nil, err + } + tmp := map[string][]ListAlertsV2ResponseAlert{} + for _, v := range result { + key := v.DisplayName + tmp[key] = append(tmp[key], v) + } + alternatives, ok := tmp[name] + if !ok || len(alternatives) == 0 { + return nil, fmt.Errorf("ListAlertsV2ResponseAlert named '%s' does not exist", name) + } + if len(alternatives) > 1 { + return nil, fmt.Errorf("there are %d instances of ListAlertsV2ResponseAlert named '%s'", len(alternatives), name) + } + return &alternatives[0], nil +} + +// Delete an alert. +// +// Moves an alert to the trash. Trashed alerts immediately disappear from list +// views, and can no longer trigger. You can restore a trashed alert through the +// UI. A trashed alert is permanently deleted after 30 days. +func (a *AlertsV2API) TrashAlertById(ctx context.Context, id string) error { + return a.alertsV2Impl.TrashAlert(ctx, TrashAlertV2Request{ + Id: id, + }) +} + type DashboardWidgetsInterface interface { // Add widget to a dashboard. diff --git a/service/sql/impl.go b/service/sql/impl.go index e5f404388..c43cd5e31 100755 --- a/service/sql/impl.go +++ b/service/sql/impl.go @@ -165,6 +165,98 @@ func (a *alertsLegacyImpl) Update(ctx context.Context, request EditAlert) error return err } +// unexported type that holds implementations of just AlertsV2 API methods +type alertsV2Impl struct { + client *client.DatabricksClient +} + +func (a *alertsV2Impl) CreateAlert(ctx context.Context, request CreateAlertV2Request) (*AlertV2, error) { + var alertV2 AlertV2 + path := "/api/2.0/alerts" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPost, path, headers, queryParams, request, &alertV2) + return &alertV2, err +} + +func (a *alertsV2Impl) GetAlert(ctx context.Context, request GetAlertV2Request) (*AlertV2, error) { + var alertV2 AlertV2 + path := fmt.Sprintf("/api/2.0/alerts/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &alertV2) + return &alertV2, err +} + +// List alerts. +// +// Gets a list of alerts accessible to the user, ordered by creation time. +func (a *alertsV2Impl) ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[ListAlertsV2ResponseAlert] { + + getNextPage := func(ctx context.Context, req ListAlertsV2Request) (*ListAlertsV2Response, error) { + ctx = useragent.InContext(ctx, "sdk-feature", "pagination") + return a.internalListAlerts(ctx, req) + } + getItems := func(resp *ListAlertsV2Response) []ListAlertsV2ResponseAlert { + return resp.Results + } + getNextReq := func(resp *ListAlertsV2Response) *ListAlertsV2Request { + if resp.NextPageToken == "" { + return nil + } + request.PageToken = resp.NextPageToken + return &request + } + iterator := listing.NewIterator( + &request, + getNextPage, + getItems, + getNextReq) + return iterator +} + +// List alerts. +// +// Gets a list of alerts accessible to the user, ordered by creation time. +func (a *alertsV2Impl) ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]ListAlertsV2ResponseAlert, error) { + iterator := a.ListAlerts(ctx, request) + return listing.ToSlice[ListAlertsV2ResponseAlert](ctx, iterator) +} + +func (a *alertsV2Impl) internalListAlerts(ctx context.Context, request ListAlertsV2Request) (*ListAlertsV2Response, error) { + var listAlertsV2Response ListAlertsV2Response + path := "/api/2.0/alerts" + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodGet, path, headers, queryParams, request, &listAlertsV2Response) + return &listAlertsV2Response, err +} + +func (a *alertsV2Impl) TrashAlert(ctx context.Context, request TrashAlertV2Request) error { + var empty Empty + path := fmt.Sprintf("/api/2.0/alerts/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + err := a.client.Do(ctx, http.MethodDelete, path, headers, queryParams, request, &empty) + return err +} + +func (a *alertsV2Impl) UpdateAlert(ctx context.Context, request UpdateAlertV2Request) (*AlertV2, error) { + var alertV2 AlertV2 + path := fmt.Sprintf("/api/2.0/alerts/%v", request.Id) + queryParams := make(map[string]any) + headers := make(map[string]string) + headers["Accept"] = "application/json" + headers["Content-Type"] = "application/json" + err := a.client.Do(ctx, http.MethodPatch, path, headers, queryParams, request, &alertV2) + return &alertV2, err +} + // unexported type that holds implementations of just DashboardWidgets API methods type dashboardWidgetsImpl struct { client *client.DatabricksClient diff --git a/service/sql/interface.go b/service/sql/interface.go index 52301cdeb..5fcef16a6 100755 --- a/service/sql/interface.go +++ b/service/sql/interface.go @@ -113,6 +113,39 @@ type AlertsLegacyService interface { Update(ctx context.Context, request EditAlert) error } +// TODO: Add description +type AlertsV2Service interface { + + // Create an alert. + // + // Create Alert + CreateAlert(ctx context.Context, request CreateAlertV2Request) (*AlertV2, error) + + // Get an alert. + // + // Gets an alert. + GetAlert(ctx context.Context, request GetAlertV2Request) (*AlertV2, error) + + // List alerts. + // + // Gets a list of alerts accessible to the user, ordered by creation time. + // + // Use ListAlertsAll() to get all ListAlertsV2ResponseAlert instances, which will iterate over every result page. + ListAlerts(ctx context.Context, request ListAlertsV2Request) (*ListAlertsV2Response, error) + + // Delete an alert. + // + // Moves an alert to the trash. Trashed alerts immediately disappear from + // list views, and can no longer trigger. You can restore a trashed alert + // through the UI. A trashed alert is permanently deleted after 30 days. + TrashAlert(ctx context.Context, request TrashAlertV2Request) error + + // Update an alert. + // + // Update alert + UpdateAlert(ctx context.Context, request UpdateAlertV2Request) (*AlertV2, error) +} + // This is an evolving API that facilitates the addition and removal of widgets // from existing dashboards within the Databricks Workspace. Data structures may // change over time. diff --git a/service/sql/model.go b/service/sql/model.go index a74f2de93..9eca5ef2b 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -27,6 +27,45 @@ func (s AccessControl) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type Aggregation string + +const AggregationAvg Aggregation = `AVG` + +const AggregationCount Aggregation = `COUNT` + +const AggregationCountDistinct Aggregation = `COUNT_DISTINCT` + +const AggregationMax Aggregation = `MAX` + +const AggregationMedian Aggregation = `MEDIAN` + +const AggregationMin Aggregation = `MIN` + +const AggregationStddev Aggregation = `STDDEV` + +const AggregationSum Aggregation = `SUM` + +// String representation for [fmt.Print] +func (f *Aggregation) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *Aggregation) Set(v string) error { + switch v { + case `AVG`, `COUNT`, `COUNT_DISTINCT`, `MAX`, `MEDIAN`, `MIN`, `STDDEV`, `SUM`: + *f = Aggregation(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "AVG", "COUNT", "COUNT_DISTINCT", "MAX", "MEDIAN", "MIN", "STDDEV", "SUM"`, v) + } +} + +// Type always returns Aggregation to satisfy [pflag.Value] interface +func (f *Aggregation) Type() string { + return "Aggregation" +} + type Alert struct { // Trigger conditions of the alert. Condition *AlertCondition `json:"condition,omitempty"` @@ -103,6 +142,40 @@ type AlertConditionThreshold struct { Value *AlertOperandValue `json:"value,omitempty"` } +// UNSPECIFIED - default unspecify value for proto enum, do not use it in the +// code UNKNOWN - alert not yet evaluated TRIGGERED - alert is triggered OK - +// alert is not triggered ERROR - alert evaluation failed +type AlertEvaluationState string + +const AlertEvaluationStateError AlertEvaluationState = `ERROR` + +const AlertEvaluationStateOk AlertEvaluationState = `OK` + +const AlertEvaluationStateTriggered AlertEvaluationState = `TRIGGERED` + +const AlertEvaluationStateUnknown AlertEvaluationState = `UNKNOWN` + +// String representation for [fmt.Print] +func (f *AlertEvaluationState) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *AlertEvaluationState) Set(v string) error { + switch v { + case `ERROR`, `OK`, `TRIGGERED`, `UNKNOWN`: + *f = AlertEvaluationState(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "ERROR", "OK", "TRIGGERED", "UNKNOWN"`, v) + } +} + +// Type always returns AlertEvaluationState to satisfy [pflag.Value] interface +func (f *AlertEvaluationState) Type() string { + return "AlertEvaluationState" +} + type AlertOperandColumn struct { Name string `json:"name,omitempty"` @@ -321,6 +394,156 @@ func (f *AlertState) Type() string { return "AlertState" } +type AlertV2 struct { + // The timestamp indicating when the alert was created. + CreateTime string `json:"create_time,omitempty"` + // Custom description for the alert. support mustache template. + CustomDescription string `json:"custom_description,omitempty"` + // Custom summary for the alert. support mustache template. + CustomSummary string `json:"custom_summary,omitempty"` + // The display name of the alert. + DisplayName string `json:"display_name,omitempty"` + + Evaluation *AlertV2Evaluation `json:"evaluation,omitempty"` + // UUID identifying the alert. + Id string `json:"id,omitempty"` + // Indicates whether the query is trashed. + LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` + // The owner's username. This field is set to "Unavailable" if the user has + // been deleted. + OwnerUserName string `json:"owner_user_name,omitempty"` + // The workspace path of the folder containing the alert. Can only be set on + // create, and cannot be updated. + ParentPath string `json:"parent_path,omitempty"` + // Text of the query to be run. + QueryText string `json:"query_text,omitempty"` + // The run as username. This field is set to "Unavailable" if the user has + // been deleted. + RunAsUserName string `json:"run_as_user_name,omitempty"` + + Schedule *CronSchedule `json:"schedule,omitempty"` + // The timestamp indicating when the alert was updated. + UpdateTime string `json:"update_time,omitempty"` + // ID of the SQL warehouse attached to the alert. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2Evaluation struct { + // Operator used for comparison in alert evaluation. + ComparisonOperator ComparisonOperator `json:"comparison_operator,omitempty"` + // Alert state if result is empty. + EmptyResultState AlertEvaluationState `json:"empty_result_state,omitempty"` + // Timestamp of the last evaluation. + LastEvaluatedAt string `json:"last_evaluated_at,omitempty"` + // User or Notification Destination to notify when alert is triggered. + Notification *AlertV2Notification `json:"notification,omitempty"` + // Source column from result to use to evaluate alert + Source *AlertV2OperandColumn `json:"source,omitempty"` + // Latest state of alert evaluation. + State AlertEvaluationState `json:"state,omitempty"` + // Threshold to user for alert evaluation, can be a column or a value. + Threshold *AlertV2Operand `json:"threshold,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2Evaluation) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2Evaluation) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2Notification struct { + // Whether to notify alert subscribers when alert returns back to normal. + NotifyOnOk bool `json:"notify_on_ok,omitempty"` + // Number of seconds an alert must wait after being triggered to rearm + // itself. After rearming, it can be triggered again. If 0 or not specified, + // the alert will not be triggered again. + RetriggerSeconds int `json:"retrigger_seconds,omitempty"` + + Subscriptions []AlertV2Subscription `json:"subscriptions,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2Notification) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2Notification) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2Operand struct { + Column *AlertV2OperandColumn `json:"column,omitempty"` + + Value *AlertV2OperandValue `json:"value,omitempty"` +} + +type AlertV2OperandColumn struct { + Aggregation Aggregation `json:"aggregation,omitempty"` + + Display string `json:"display,omitempty"` + + Name string `json:"name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2OperandColumn) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2OperandColumn) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2OperandValue struct { + BoolValue bool `json:"bool_value,omitempty"` + + DoubleValue float64 `json:"double_value,omitempty"` + + StringValue string `json:"string_value,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2OperandValue) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2OperandValue) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type AlertV2Subscription struct { + DestinationId string `json:"destination_id,omitempty"` + + UserEmail string `json:"user_email,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *AlertV2Subscription) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s AlertV2Subscription) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Describes metadata for a particular chunk, within a result set; this // structure is used both within a manifest, and when fetching individual chunk // data or links. @@ -548,6 +771,45 @@ func (f *ColumnInfoTypeName) Type() string { return "ColumnInfoTypeName" } +type ComparisonOperator string + +const ComparisonOperatorEqual ComparisonOperator = `EQUAL` + +const ComparisonOperatorGreaterThan ComparisonOperator = `GREATER_THAN` + +const ComparisonOperatorGreaterThanOrEqual ComparisonOperator = `GREATER_THAN_OR_EQUAL` + +const ComparisonOperatorIsNotNull ComparisonOperator = `IS_NOT_NULL` + +const ComparisonOperatorIsNull ComparisonOperator = `IS_NULL` + +const ComparisonOperatorLessThan ComparisonOperator = `LESS_THAN` + +const ComparisonOperatorLessThanOrEqual ComparisonOperator = `LESS_THAN_OR_EQUAL` + +const ComparisonOperatorNotEqual ComparisonOperator = `NOT_EQUAL` + +// String representation for [fmt.Print] +func (f *ComparisonOperator) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *ComparisonOperator) Set(v string) error { + switch v { + case `EQUAL`, `GREATER_THAN`, `GREATER_THAN_OR_EQUAL`, `IS_NOT_NULL`, `IS_NULL`, `LESS_THAN`, `LESS_THAN_OR_EQUAL`, `NOT_EQUAL`: + *f = ComparisonOperator(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "EQUAL", "GREATER_THAN", "GREATER_THAN_OR_EQUAL", "IS_NOT_NULL", "IS_NULL", "LESS_THAN", "LESS_THAN_OR_EQUAL", "NOT_EQUAL"`, v) + } +} + +// Type always returns ComparisonOperator to satisfy [pflag.Value] interface +func (f *ComparisonOperator) Type() string { + return "ComparisonOperator" +} + type CreateAlert struct { // Name of the alert. Name string `json:"name"` @@ -575,6 +837,20 @@ func (s CreateAlert) MarshalJSON() ([]byte, error) { type CreateAlertRequest struct { Alert *CreateAlertRequestAlert `json:"alert,omitempty"` + // If true, automatically resolve alert display name conflicts. Otherwise, + // fail the request if the alert's display name conflicts with an existing + // alert's display name. + AutoResolveDisplayName bool `json:"auto_resolve_display_name,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CreateAlertRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateAlertRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type CreateAlertRequestAlert struct { @@ -615,8 +891,27 @@ func (s CreateAlertRequestAlert) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type CreateAlertV2Request struct { + Alert *AlertV2 `json:"alert,omitempty"` +} + type CreateQueryRequest struct { + // If true, automatically resolve query display name conflicts. Otherwise, + // fail the request if the query's display name conflicts with an existing + // query's display name. + AutoResolveDisplayName bool `json:"auto_resolve_display_name,omitempty"` + Query *CreateQueryRequestQuery `json:"query,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CreateQueryRequest) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CreateQueryRequest) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) } type CreateQueryRequestQuery struct { @@ -863,6 +1158,31 @@ func (s CreateWidget) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type CronSchedule struct { + // Indicate whether this schedule is paused or not. + PauseStatus SchedulePauseStatus `json:"pause_status,omitempty"` + // A cron expression using quartz syntax that specifies the schedule for + // this pipeline. Should use the quartz format described here: + // http://www.quartz-scheduler.org/documentation/quartz-2.1.7/tutorials/tutorial-lesson-06.html + QuartzCronSchedule string `json:"quartz_cron_schedule,omitempty"` + // A Java timezone id. The schedule will be resolved using this timezone. + // This will be combined with the quartz_cron_schedule to determine the + // schedule. See + // https://docs.databricks.com/sql/language-manual/sql-ref-syntax-aux-conf-mgmt-set-timezone.html + // for details. + TimezoneId string `json:"timezone_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *CronSchedule) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s CronSchedule) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // A JSON representing a dashboard containing widgets of visualizations and text // boxes. type Dashboard struct { @@ -1496,7 +1816,7 @@ type EndpointInfo struct { // Supported values: - Must be unique within an org. - Must be less than 100 // characters. Name string `json:"name,omitempty"` - // current number of active sessions for the warehouse + // Deprecated. current number of active sessions for the warehouse NumActiveSessions int64 `json:"num_active_sessions,omitempty"` // current number of clusters running for the service NumClusters int `json:"num_clusters,omitempty"` @@ -1892,6 +2212,11 @@ type GetAlertRequest struct { Id string `json:"-" url:"-"` } +// Get an alert +type GetAlertV2Request struct { + Id string `json:"-" url:"-"` +} + // Get an alert type GetAlertsLegacyRequest struct { AlertId string `json:"-" url:"-"` @@ -2032,7 +2357,7 @@ type GetWarehouseResponse struct { // Supported values: - Must be unique within an org. - Must be less than 100 // characters. Name string `json:"name,omitempty"` - // current number of active sessions for the warehouse + // Deprecated. current number of active sessions for the warehouse NumActiveSessions int64 `json:"num_active_sessions,omitempty"` // current number of clusters running for the service NumClusters int `json:"num_clusters,omitempty"` @@ -2466,6 +2791,80 @@ func (s ListAlertsResponseAlert) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// List alerts +type ListAlertsV2Request struct { + PageSize int `json:"-" url:"page_size,omitempty"` + + PageToken string `json:"-" url:"page_token,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ListAlertsV2Request) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAlertsV2Request) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAlertsV2Response struct { + NextPageToken string `json:"next_page_token,omitempty"` + + Results []ListAlertsV2ResponseAlert `json:"results,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ListAlertsV2Response) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAlertsV2Response) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + +type ListAlertsV2ResponseAlert struct { + // The timestamp indicating when the alert was created. + CreateTime string `json:"create_time,omitempty"` + // Custom description for the alert. support mustache template. + CustomDescription string `json:"custom_description,omitempty"` + // Custom summary for the alert. support mustache template. + CustomSummary string `json:"custom_summary,omitempty"` + // The display name of the alert. + DisplayName string `json:"display_name,omitempty"` + + Evaluation *AlertV2Evaluation `json:"evaluation,omitempty"` + // UUID identifying the alert. + Id string `json:"id,omitempty"` + // Indicates whether the query is trashed. + LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` + // The owner's username. This field is set to "Unavailable" if the user has + // been deleted. + OwnerUserName string `json:"owner_user_name,omitempty"` + // Text of the query to be run. + QueryText string `json:"query_text,omitempty"` + // The run as username. This field is set to "Unavailable" if the user has + // been deleted. + RunAsUserName string `json:"run_as_user_name,omitempty"` + + Schedule *CronSchedule `json:"schedule,omitempty"` + // The timestamp indicating when the alert was updated. + UpdateTime string `json:"update_time,omitempty"` + // ID of the SQL warehouse attached to the alert. + WarehouseId string `json:"warehouse_id,omitempty"` + + ForceSendFields []string `json:"-" url:"-"` +} + +func (s *ListAlertsV2ResponseAlert) UnmarshalJSON(b []byte) error { + return marshal.Unmarshal(b, s) +} + +func (s ListAlertsV2ResponseAlert) MarshalJSON() ([]byte, error) { + return marshal.Marshal(s) +} + // Get dashboard objects type ListDashboardsRequest struct { // Name of dashboard attribute to order by. @@ -3684,6 +4083,33 @@ func (f *RunAsRole) Type() string { return "RunAsRole" } +type SchedulePauseStatus string + +const SchedulePauseStatusPaused SchedulePauseStatus = `PAUSED` + +const SchedulePauseStatusUnpaused SchedulePauseStatus = `UNPAUSED` + +// String representation for [fmt.Print] +func (f *SchedulePauseStatus) String() string { + return string(*f) +} + +// Set raw string value and validate it against allowed values +func (f *SchedulePauseStatus) Set(v string) error { + switch v { + case `PAUSED`, `UNPAUSED`: + *f = SchedulePauseStatus(v) + return nil + default: + return fmt.Errorf(`value "%s" is not one of "PAUSED", "UNPAUSED"`, v) + } +} + +// Type always returns SchedulePauseStatus to satisfy [pflag.Value] interface +func (f *SchedulePauseStatus) Type() string { + return "SchedulePauseStatus" +} + type ServiceError struct { ErrorCode ServiceErrorCode `json:"error_code,omitempty"` // A brief summary of the error condition. @@ -4402,6 +4828,11 @@ type TrashAlertRequest struct { Id string `json:"-" url:"-"` } +// Delete an alert +type TrashAlertV2Request struct { + Id string `json:"-" url:"-"` +} + // Delete a query type TrashQueryRequest struct { Id string `json:"-" url:"-"` @@ -4464,6 +4895,24 @@ func (s UpdateAlertRequestAlert) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +type UpdateAlertV2Request struct { + Alert *AlertV2 `json:"alert,omitempty"` + // UUID identifying the alert. + Id string `json:"-" url:"-"` + // The field mask must be a single string, with multiple fields separated by + // commas (no spaces). The field path is relative to the resource object, + // using a dot (`.`) to navigate sub-fields (e.g., `author.given_name`). + // Specification of elements in sequence or map fields is not allowed, as + // only the entire collection field can be specified. Field names must + // exactly match the resource field names. + // + // A field mask of `*` indicates full replacement. It’s recommended to + // always explicitly list the fields being updated and avoid using `*` + // wildcards, as it can lead to unintended results if the API changes in the + // future. + UpdateMask string `json:"update_mask"` +} + type UpdateQueryRequest struct { Id string `json:"-" url:"-"` diff --git a/workspace_client.go b/workspace_client.go index 13cd2134f..a66fc8983 100755 --- a/workspace_client.go +++ b/workspace_client.go @@ -62,6 +62,9 @@ type WorkspaceClient struct { // [Learn more]: https://docs.databricks.com/en/sql/dbsql-api-latest.html AlertsLegacy sql.AlertsLegacyInterface + // TODO: Add description + AlertsV2 sql.AlertsV2Interface + // Apps run directly on a customer’s Databricks instance, integrate with // their data, use and extend Databricks services, and enable users to // interact through single sign-on. @@ -1176,6 +1179,7 @@ func NewWorkspaceClient(c ...*Config) (*WorkspaceClient, error) { AccountAccessControlProxy: iam.NewAccountAccessControlProxy(databricksClient), Alerts: sql.NewAlerts(databricksClient), AlertsLegacy: sql.NewAlertsLegacy(databricksClient), + AlertsV2: sql.NewAlertsV2(databricksClient), Apps: apps.NewApps(databricksClient), ArtifactAllowlists: catalog.NewArtifactAllowlists(databricksClient), Catalogs: catalog.NewCatalogs(databricksClient), From 4e775146dc0cb65a9e5621040ba38e0ffd91143c Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Wed, 30 Apr 2025 11:30:32 +0000 Subject: [PATCH 48/54] [Release] Release v0.66.0 ## Release v0.66.0 ### Bug Fixes * Tolerate trailing slashes in hostnames in `Config`. ### API Changes * Added [w.AlertsV2](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API) workspace-level service. * Added `UpdateNccAzurePrivateEndpointRulePublic` method for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service. * Added `CreatedAt`, `CreatedBy` and `MetastoreId` fields for [catalog.SetArtifactAllowlist](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#SetArtifactAllowlist). * [Breaking] Added `NetworkConnectivityConfig` field for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). * [Breaking] Added `PrivateEndpointRule` field for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). * Added `DomainNames` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule). * Added `AutoResolveDisplayName` field for [sql.CreateAlertRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateAlertRequest). * Added `AutoResolveDisplayName` field for [sql.CreateQueryRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateQueryRequest). * Added `CreateCleanRoom`, `ExecuteCleanRoomTask` and `ModifyCleanRoom` enum values for [catalog.Privilege](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#Privilege). * Added `DnsResolutionError` and `GcpDeniedByOrgPolicy` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). * Added `Expired` enum value for [settings.NccAzurePrivateEndpointRuleConnectionState](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleConnectionState). * [Breaking] Changed `CreateNetworkConnectivityConfiguration` and `CreatePrivateEndpointRule` methods for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service with new required argument order. * [Breaking] Changed `WorkloadSize` field for [serving.ServedModelInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInput) to type `string`. * [Breaking] Changed `GroupId` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule) to type `string`. * [Breaking] Changed `TargetServices` field for [settings.NccAzureServiceEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzureServiceEndpointRule) to type [settings.EgressResourceTypeList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EgressResourceTypeList). * [Breaking] Removed `Name` and `Region` fields for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). * [Breaking] Removed `GroupId` and `ResourceId` fields for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). * [Breaking] Removed `Large`, `Medium` and `Small` enum values for [serving.ServedModelInputWorkloadSize](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInputWorkloadSize). * [Breaking] Removed `Blob`, `Dfs`, `MysqlServer` and `SqlServer` enum values for [settings.NccAzurePrivateEndpointRuleGroupId](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleGroupId). * [Breaking] Field `AppDeployment` of `CreateAppDeploymentRequest` is changed from `*AppDeployment` to `AppDeployment`. * [Breaking] Field `App` of `CreateAppRequest` is changed from `*App` to `App`. * [Breaking] Field `App` of `UpdateAppRequest` is changed from `*App` to `App`. * [Breaking] Field `BudgetPolicy` of `UpdateBudgetPolicyRequest` is changed from `*BudgetPolicy` to `BudgetPolicy`. * [Breaking] Field `OnlineTable` of `CreateOnlineTableRequest` is changed from `*OnlineTable` to `OnlineTable`. * [Breaking] Field `CleanRoomAsset` of `CreateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. * [Breaking] Field `CleanRoom` of `CreateCleanRoomRequest` is changed from `*CleanRoom` to `CleanRoom`. * [Breaking] Field `CleanRoomAsset` of `UpdateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. * [Breaking] Field `Dashboard` of `CreateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. * [Breaking] Field `Schedule` of `CreateScheduleRequest` is changed from `*Schedule` to `Schedule`. * [Breaking] Field `Subscription` of `CreateSubscriptionRequest` is changed from `*Subscription` to `Subscription`. * [Breaking] Field `Dashboard` of `UpdateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. --- .release_metadata.json | 2 +- CHANGELOG.md | 39 +++++++++++++++++++++++++++++++++++++++ NEXT_CHANGELOG.md | 34 +--------------------------------- version/version.go | 2 +- 4 files changed, 42 insertions(+), 35 deletions(-) diff --git a/.release_metadata.json b/.release_metadata.json index 48af521ab..55c7bb785 100644 --- a/.release_metadata.json +++ b/.release_metadata.json @@ -1,3 +1,3 @@ { - "timestamp": "2025-04-29 11:36:58+0000" + "timestamp": "2025-04-30 11:30:27+0000" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index b198ed3e6..a54dcd769 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,44 @@ # Version changelog +## Release v0.66.0 + +### Bug Fixes +* Tolerate trailing slashes in hostnames in `Config`. + +### API Changes +* Added [w.AlertsV2](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API) workspace-level service. +* Added `UpdateNccAzurePrivateEndpointRulePublic` method for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service. +* Added `CreatedAt`, `CreatedBy` and `MetastoreId` fields for [catalog.SetArtifactAllowlist](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#SetArtifactAllowlist). +* [Breaking] Added `NetworkConnectivityConfig` field for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). +* [Breaking] Added `PrivateEndpointRule` field for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). +* Added `DomainNames` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule). +* Added `AutoResolveDisplayName` field for [sql.CreateAlertRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateAlertRequest). +* Added `AutoResolveDisplayName` field for [sql.CreateQueryRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateQueryRequest). +* Added `CreateCleanRoom`, `ExecuteCleanRoomTask` and `ModifyCleanRoom` enum values for [catalog.Privilege](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#Privilege). +* Added `DnsResolutionError` and `GcpDeniedByOrgPolicy` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* Added `Expired` enum value for [settings.NccAzurePrivateEndpointRuleConnectionState](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleConnectionState). +* [Breaking] Changed `CreateNetworkConnectivityConfiguration` and `CreatePrivateEndpointRule` methods for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service with new required argument order. +* [Breaking] Changed `WorkloadSize` field for [serving.ServedModelInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInput) to type `string`. +* [Breaking] Changed `GroupId` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule) to type `string`. +* [Breaking] Changed `TargetServices` field for [settings.NccAzureServiceEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzureServiceEndpointRule) to type [settings.EgressResourceTypeList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EgressResourceTypeList). +* [Breaking] Removed `Name` and `Region` fields for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). +* [Breaking] Removed `GroupId` and `ResourceId` fields for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). +* [Breaking] Removed `Large`, `Medium` and `Small` enum values for [serving.ServedModelInputWorkloadSize](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInputWorkloadSize). +* [Breaking] Removed `Blob`, `Dfs`, `MysqlServer` and `SqlServer` enum values for [settings.NccAzurePrivateEndpointRuleGroupId](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleGroupId). +* [Breaking] Field `AppDeployment` of `CreateAppDeploymentRequest` is changed from `*AppDeployment` to `AppDeployment`. +* [Breaking] Field `App` of `CreateAppRequest` is changed from `*App` to `App`. +* [Breaking] Field `App` of `UpdateAppRequest` is changed from `*App` to `App`. +* [Breaking] Field `BudgetPolicy` of `UpdateBudgetPolicyRequest` is changed from `*BudgetPolicy` to `BudgetPolicy`. +* [Breaking] Field `OnlineTable` of `CreateOnlineTableRequest` is changed from `*OnlineTable` to `OnlineTable`. +* [Breaking] Field `CleanRoomAsset` of `CreateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. +* [Breaking] Field `CleanRoom` of `CreateCleanRoomRequest` is changed from `*CleanRoom` to `CleanRoom`. +* [Breaking] Field `CleanRoomAsset` of `UpdateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. +* [Breaking] Field `Dashboard` of `CreateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. +* [Breaking] Field `Schedule` of `CreateScheduleRequest` is changed from `*Schedule` to `Schedule`. +* [Breaking] Field `Subscription` of `CreateSubscriptionRequest` is changed from `*Subscription` to `Subscription`. +* [Breaking] Field `Dashboard` of `UpdateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. + + ## Release v0.65.0 ### New Features and Improvements diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 0c210415e..0fd0b1445 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,45 +1,13 @@ # NEXT CHANGELOG -## Release v0.66.0 +## Release v0.67.0 ### New Features and Improvements ### Bug Fixes -* Tolerate trailing slashes in hostnames in `Config`. ### Documentation ### Internal Changes ### API Changes -* Added [w.AlertsV2](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API) workspace-level service. -* Added `UpdateNccAzurePrivateEndpointRulePublic` method for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service. -* Added `CreatedAt`, `CreatedBy` and `MetastoreId` fields for [catalog.SetArtifactAllowlist](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#SetArtifactAllowlist). -* [Breaking] Added `NetworkConnectivityConfig` field for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). -* [Breaking] Added `PrivateEndpointRule` field for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). -* Added `DomainNames` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule). -* Added `AutoResolveDisplayName` field for [sql.CreateAlertRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateAlertRequest). -* Added `AutoResolveDisplayName` field for [sql.CreateQueryRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#CreateQueryRequest). -* Added `CreateCleanRoom`, `ExecuteCleanRoomTask` and `ModifyCleanRoom` enum values for [catalog.Privilege](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/catalog#Privilege). -* Added `DnsResolutionError` and `GcpDeniedByOrgPolicy` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). -* Added `Expired` enum value for [settings.NccAzurePrivateEndpointRuleConnectionState](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleConnectionState). -* [Breaking] Changed `CreateNetworkConnectivityConfiguration` and `CreatePrivateEndpointRule` methods for [a.NetworkConnectivity](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NetworkConnectivityAPI) account-level service with new required argument order. -* [Breaking] Changed `WorkloadSize` field for [serving.ServedModelInput](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInput) to type `string`. -* [Breaking] Changed `GroupId` field for [settings.NccAzurePrivateEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRule) to type `string`. -* [Breaking] Changed `TargetServices` field for [settings.NccAzureServiceEndpointRule](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzureServiceEndpointRule) to type [settings.EgressResourceTypeList](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#EgressResourceTypeList). -* [Breaking] Removed `Name` and `Region` fields for [settings.CreateNetworkConnectivityConfigRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreateNetworkConnectivityConfigRequest). -* [Breaking] Removed `GroupId` and `ResourceId` fields for [settings.CreatePrivateEndpointRuleRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#CreatePrivateEndpointRuleRequest). -* [Breaking] Removed `Large`, `Medium` and `Small` enum values for [serving.ServedModelInputWorkloadSize](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/serving#ServedModelInputWorkloadSize). -* [Breaking] Removed `Blob`, `Dfs`, `MysqlServer` and `SqlServer` enum values for [settings.NccAzurePrivateEndpointRuleGroupId](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/settings#NccAzurePrivateEndpointRuleGroupId). -* [Breaking] Field `AppDeployment` of `CreateAppDeploymentRequest` is changed from `*AppDeployment` to `AppDeployment`. -* [Breaking] Field `App` of `CreateAppRequest` is changed from `*App` to `App`. -* [Breaking] Field `App` of `UpdateAppRequest` is changed from `*App` to `App`. -* [Breaking] Field `BudgetPolicy` of `UpdateBudgetPolicyRequest` is changed from `*BudgetPolicy` to `BudgetPolicy`. -* [Breaking] Field `OnlineTable` of `CreateOnlineTableRequest` is changed from `*OnlineTable` to `OnlineTable`. -* [Breaking] Field `CleanRoomAsset` of `CreateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. -* [Breaking] Field `CleanRoom` of `CreateCleanRoomRequest` is changed from `*CleanRoom` to `CleanRoom`. -* [Breaking] Field `CleanRoomAsset` of `UpdateCleanRoomAssetRequest` is changed from `*CleanRoomAsset` to `CleanRoomAsset`. -* [Breaking] Field `Dashboard` of `CreateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. -* [Breaking] Field `Schedule` of `CreateScheduleRequest` is changed from `*Schedule` to `Schedule`. -* [Breaking] Field `Subscription` of `CreateSubscriptionRequest` is changed from `*Subscription` to `Subscription`. -* [Breaking] Field `Dashboard` of `UpdateDashboardRequest` is changed from `*Dashboard` to `Dashboard`. diff --git a/version/version.go b/version/version.go index 023937540..256c09108 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.65.0" +const Version = "0.66.0" From c7796ceca71d5fc91ff6ead4066fe43055619e89 Mon Sep 17 00:00:00 2001 From: Miles Yucht Date: Fri, 2 May 2025 18:20:35 +0200 Subject: [PATCH 49/54] Bump API Specification to 2 May 2025 (#1214) ## What changes are proposed in this pull request? Bumps the API specification for the Go SDK to the latest API definition as of 2 May 2025. In particular, this fixes the handling of VectorSearch's QueryIndex() method. ## How is this tested? Tested by existing tests. --- .codegen/_openapi_sha | 2 +- .gitattributes | 1 + NEXT_CHANGELOG.md | 18 ++ .../service/sql/mock_alerts_v2_interface.go | 158 +++++++++--------- service/compute/model.go | 18 +- service/dashboards/api.go | 4 +- service/files/model.go | 3 +- service/ml/model.go | 6 +- service/pipelines/impl.go | 6 +- service/pipelines/model.go | 59 ++++--- service/pkg.go | 4 +- service/sql/api.go | 42 ++--- service/sql/impl.go | 8 +- service/sql/interface.go | 2 +- service/sql/model.go | 43 +---- service/vectorsearch/model.go | 2 +- 16 files changed, 193 insertions(+), 183 deletions(-) diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index e7f752fb5..3b0b1fdac 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -ce962ccd0a078a5a9d89494fe38d237ce377d5f3 \ No newline at end of file +d4c86c045ee9d0410a41ef07e8ae708673b95fa1 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 3296c8f9f..679971617 100644 --- a/.gitattributes +++ b/.gitattributes @@ -131,6 +131,7 @@ experimental/mocks/service/sharing/mock_recipients_interface.go linguist-generat experimental/mocks/service/sharing/mock_shares_interface.go linguist-generated=true experimental/mocks/service/sql/mock_alerts_interface.go linguist-generated=true experimental/mocks/service/sql/mock_alerts_legacy_interface.go linguist-generated=true +experimental/mocks/service/sql/mock_alerts_v2_interface.go linguist-generated=true experimental/mocks/service/sql/mock_dashboard_widgets_interface.go linguist-generated=true experimental/mocks/service/sql/mock_dashboards_interface.go linguist-generated=true experimental/mocks/service/sql/mock_data_sources_interface.go linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 0fd0b1445..fc2d039d5 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -5,9 +5,27 @@ ### New Features and Improvements ### Bug Fixes +* Fixed the deserialization of responses in VectorSearchAPI's `QueryIndex()` method ([#1214](https://github.com/databricks/databricks-sdk-py/pull/1214)). ### Documentation ### Internal Changes ### API Changes +* Added `FutureFeatureDataPath` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* Added `ExcludeColumns` and `IncludeColumns` fields for [pipelines.TableSpecificConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpecificConfig). +* Added `NetworkCheckControlPlaneFailure`, `NetworkCheckDnsServerFailure`, `NetworkCheckMetadataEndpointFailure`, `NetworkCheckMultipleComponentsFailure`, `NetworkCheckNicFailure`, `NetworkCheckStorageFailure` and `SecretPermissionDenied` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* [Breaking] Changed [vectorsearch.ListValue](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValue) to. +* [Breaking] Changed `PipelineId` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline) to be required. +* Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. +* [Breaking] Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. +* [Breaking] Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. +* Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. +* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. +* Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. +* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. +* Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. +* Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. +* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. +* [Breaking] Changed pagination for [AlertsV2API.ListAlerts](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API.ListAlerts). +* [Breaking] Changed waiter for [GenieAPI.CreateMessage](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI.CreateMessage). diff --git a/experimental/mocks/service/sql/mock_alerts_v2_interface.go b/experimental/mocks/service/sql/mock_alerts_v2_interface.go index d48e31c99..91100ef51 100644 --- a/experimental/mocks/service/sql/mock_alerts_v2_interface.go +++ b/experimental/mocks/service/sql/mock_alerts_v2_interface.go @@ -24,6 +24,65 @@ func (_m *MockAlertsV2Interface) EXPECT() *MockAlertsV2Interface_Expecter { return &MockAlertsV2Interface_Expecter{mock: &_m.Mock} } +// AlertV2DisplayNameToIdMap provides a mock function with given fields: ctx, request +func (_m *MockAlertsV2Interface) AlertV2DisplayNameToIdMap(ctx context.Context, request sql.ListAlertsV2Request) (map[string]string, error) { + ret := _m.Called(ctx, request) + + if len(ret) == 0 { + panic("no return value specified for AlertV2DisplayNameToIdMap") + } + + var r0 map[string]string + var r1 error + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)); ok { + return rf(ctx, request) + } + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) map[string]string); ok { + r0 = rf(ctx, request) + } else { + if ret.Get(0) != nil { + r0 = ret.Get(0).(map[string]string) + } + } + + if rf, ok := ret.Get(1).(func(context.Context, sql.ListAlertsV2Request) error); ok { + r1 = rf(ctx, request) + } else { + r1 = ret.Error(1) + } + + return r0, r1 +} + +// MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'AlertV2DisplayNameToIdMap' +type MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call struct { + *mock.Call +} + +// AlertV2DisplayNameToIdMap is a helper method to define mock.On call +// - ctx context.Context +// - request sql.ListAlertsV2Request +func (_e *MockAlertsV2Interface_Expecter) AlertV2DisplayNameToIdMap(ctx interface{}, request interface{}) *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call { + return &MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call{Call: _e.mock.On("AlertV2DisplayNameToIdMap", ctx, request)} +} + +func (_c *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call { + _c.Call.Run(func(args mock.Arguments) { + run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) + }) + return _c +} + +func (_c *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call) Return(_a0 map[string]string, _a1 error) *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call { + _c.Call.Return(_a0, _a1) + return _c +} + +func (_c *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)) *MockAlertsV2Interface_AlertV2DisplayNameToIdMap_Call { + _c.Call.Return(run) + return _c +} + // CreateAlert provides a mock function with given fields: ctx, request func (_m *MockAlertsV2Interface) CreateAlert(ctx context.Context, request sql.CreateAlertV2Request) (*sql.AlertV2, error) { ret := _m.Called(ctx, request) @@ -202,23 +261,23 @@ func (_c *MockAlertsV2Interface_GetAlertById_Call) RunAndReturn(run func(context } // GetByDisplayName provides a mock function with given fields: ctx, name -func (_m *MockAlertsV2Interface) GetByDisplayName(ctx context.Context, name string) (*sql.ListAlertsV2ResponseAlert, error) { +func (_m *MockAlertsV2Interface) GetByDisplayName(ctx context.Context, name string) (*sql.AlertV2, error) { ret := _m.Called(ctx, name) if len(ret) == 0 { panic("no return value specified for GetByDisplayName") } - var r0 *sql.ListAlertsV2ResponseAlert + var r0 *sql.AlertV2 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, string) (*sql.ListAlertsV2ResponseAlert, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) (*sql.AlertV2, error)); ok { return rf(ctx, name) } - if rf, ok := ret.Get(0).(func(context.Context, string) *sql.ListAlertsV2ResponseAlert); ok { + if rf, ok := ret.Get(0).(func(context.Context, string) *sql.AlertV2); ok { r0 = rf(ctx, name) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(*sql.ListAlertsV2ResponseAlert) + r0 = ret.Get(0).(*sql.AlertV2) } } @@ -250,30 +309,30 @@ func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Run(run func(ctx context. return _c } -func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Return(_a0 *sql.ListAlertsV2ResponseAlert, _a1 error) *MockAlertsV2Interface_GetByDisplayName_Call { +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) Return(_a0 *sql.AlertV2, _a1 error) *MockAlertsV2Interface_GetByDisplayName_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockAlertsV2Interface_GetByDisplayName_Call) RunAndReturn(run func(context.Context, string) (*sql.ListAlertsV2ResponseAlert, error)) *MockAlertsV2Interface_GetByDisplayName_Call { +func (_c *MockAlertsV2Interface_GetByDisplayName_Call) RunAndReturn(run func(context.Context, string) (*sql.AlertV2, error)) *MockAlertsV2Interface_GetByDisplayName_Call { _c.Call.Return(run) return _c } // ListAlerts provides a mock function with given fields: ctx, request -func (_m *MockAlertsV2Interface) ListAlerts(ctx context.Context, request sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert] { +func (_m *MockAlertsV2Interface) ListAlerts(ctx context.Context, request sql.ListAlertsV2Request) listing.Iterator[sql.AlertV2] { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for ListAlerts") } - var r0 listing.Iterator[sql.ListAlertsV2ResponseAlert] - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert]); ok { + var r0 listing.Iterator[sql.AlertV2] + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.AlertV2]); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).(listing.Iterator[sql.ListAlertsV2ResponseAlert]) + r0 = ret.Get(0).(listing.Iterator[sql.AlertV2]) } } @@ -299,34 +358,34 @@ func (_c *MockAlertsV2Interface_ListAlerts_Call) Run(run func(ctx context.Contex return _c } -func (_c *MockAlertsV2Interface_ListAlerts_Call) Return(_a0 listing.Iterator[sql.ListAlertsV2ResponseAlert]) *MockAlertsV2Interface_ListAlerts_Call { +func (_c *MockAlertsV2Interface_ListAlerts_Call) Return(_a0 listing.Iterator[sql.AlertV2]) *MockAlertsV2Interface_ListAlerts_Call { _c.Call.Return(_a0) return _c } -func (_c *MockAlertsV2Interface_ListAlerts_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.ListAlertsV2ResponseAlert]) *MockAlertsV2Interface_ListAlerts_Call { +func (_c *MockAlertsV2Interface_ListAlerts_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) listing.Iterator[sql.AlertV2]) *MockAlertsV2Interface_ListAlerts_Call { _c.Call.Return(run) return _c } // ListAlertsAll provides a mock function with given fields: ctx, request -func (_m *MockAlertsV2Interface) ListAlertsAll(ctx context.Context, request sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error) { +func (_m *MockAlertsV2Interface) ListAlertsAll(ctx context.Context, request sql.ListAlertsV2Request) ([]sql.AlertV2, error) { ret := _m.Called(ctx, request) if len(ret) == 0 { panic("no return value specified for ListAlertsAll") } - var r0 []sql.ListAlertsV2ResponseAlert + var r0 []sql.AlertV2 var r1 error - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error)); ok { + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) ([]sql.AlertV2, error)); ok { return rf(ctx, request) } - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) []sql.ListAlertsV2ResponseAlert); ok { + if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) []sql.AlertV2); ok { r0 = rf(ctx, request) } else { if ret.Get(0) != nil { - r0 = ret.Get(0).([]sql.ListAlertsV2ResponseAlert) + r0 = ret.Get(0).([]sql.AlertV2) } } @@ -358,71 +417,12 @@ func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Run(run func(ctx context.Con return _c } -func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Return(_a0 []sql.ListAlertsV2ResponseAlert, _a1 error) *MockAlertsV2Interface_ListAlertsAll_Call { - _c.Call.Return(_a0, _a1) - return _c -} - -func (_c *MockAlertsV2Interface_ListAlertsAll_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) ([]sql.ListAlertsV2ResponseAlert, error)) *MockAlertsV2Interface_ListAlertsAll_Call { - _c.Call.Return(run) - return _c -} - -// ListAlertsV2ResponseAlertDisplayNameToIdMap provides a mock function with given fields: ctx, request -func (_m *MockAlertsV2Interface) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request sql.ListAlertsV2Request) (map[string]string, error) { - ret := _m.Called(ctx, request) - - if len(ret) == 0 { - panic("no return value specified for ListAlertsV2ResponseAlertDisplayNameToIdMap") - } - - var r0 map[string]string - var r1 error - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)); ok { - return rf(ctx, request) - } - if rf, ok := ret.Get(0).(func(context.Context, sql.ListAlertsV2Request) map[string]string); ok { - r0 = rf(ctx, request) - } else { - if ret.Get(0) != nil { - r0 = ret.Get(0).(map[string]string) - } - } - - if rf, ok := ret.Get(1).(func(context.Context, sql.ListAlertsV2Request) error); ok { - r1 = rf(ctx, request) - } else { - r1 = ret.Error(1) - } - - return r0, r1 -} - -// MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call is a *mock.Call that shadows Run/Return methods with type explicit version for method 'ListAlertsV2ResponseAlertDisplayNameToIdMap' -type MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call struct { - *mock.Call -} - -// ListAlertsV2ResponseAlertDisplayNameToIdMap is a helper method to define mock.On call -// - ctx context.Context -// - request sql.ListAlertsV2Request -func (_e *MockAlertsV2Interface_Expecter) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx interface{}, request interface{}) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { - return &MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call{Call: _e.mock.On("ListAlertsV2ResponseAlertDisplayNameToIdMap", ctx, request)} -} - -func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) Run(run func(ctx context.Context, request sql.ListAlertsV2Request)) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { - _c.Call.Run(func(args mock.Arguments) { - run(args[0].(context.Context), args[1].(sql.ListAlertsV2Request)) - }) - return _c -} - -func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) Return(_a0 map[string]string, _a1 error) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) Return(_a0 []sql.AlertV2, _a1 error) *MockAlertsV2Interface_ListAlertsAll_Call { _c.Call.Return(_a0, _a1) return _c } -func (_c *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) (map[string]string, error)) *MockAlertsV2Interface_ListAlertsV2ResponseAlertDisplayNameToIdMap_Call { +func (_c *MockAlertsV2Interface_ListAlertsAll_Call) RunAndReturn(run func(context.Context, sql.ListAlertsV2Request) ([]sql.AlertV2, error)) *MockAlertsV2Interface_ListAlertsAll_Call { _c.Call.Return(run) return _c } diff --git a/service/compute/model.go b/service/compute/model.go index 7125b7e2b..bf61fd0f1 100755 --- a/service/compute/model.go +++ b/service/compute/model.go @@ -5499,6 +5499,18 @@ const TerminationReasonCodeNephosResourceManagement TerminationReasonCode = `NEP const TerminationReasonCodeNetvisorSetupTimeout TerminationReasonCode = `NETVISOR_SETUP_TIMEOUT` +const TerminationReasonCodeNetworkCheckControlPlaneFailure TerminationReasonCode = `NETWORK_CHECK_CONTROL_PLANE_FAILURE` + +const TerminationReasonCodeNetworkCheckDnsServerFailure TerminationReasonCode = `NETWORK_CHECK_DNS_SERVER_FAILURE` + +const TerminationReasonCodeNetworkCheckMetadataEndpointFailure TerminationReasonCode = `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE` + +const TerminationReasonCodeNetworkCheckMultipleComponentsFailure TerminationReasonCode = `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE` + +const TerminationReasonCodeNetworkCheckNicFailure TerminationReasonCode = `NETWORK_CHECK_NIC_FAILURE` + +const TerminationReasonCodeNetworkCheckStorageFailure TerminationReasonCode = `NETWORK_CHECK_STORAGE_FAILURE` + const TerminationReasonCodeNetworkConfigurationFailure TerminationReasonCode = `NETWORK_CONFIGURATION_FAILURE` const TerminationReasonCodeNfsMountFailure TerminationReasonCode = `NFS_MOUNT_FAILURE` @@ -5523,6 +5535,8 @@ const TerminationReasonCodeResourceUsageBlocked TerminationReasonCode = `RESOURC const TerminationReasonCodeSecretCreationFailure TerminationReasonCode = `SECRET_CREATION_FAILURE` +const TerminationReasonCodeSecretPermissionDenied TerminationReasonCode = `SECRET_PERMISSION_DENIED` + const TerminationReasonCodeSecretResolutionError TerminationReasonCode = `SECRET_RESOLUTION_ERROR` const TerminationReasonCodeSecurityDaemonRegistrationException TerminationReasonCode = `SECURITY_DAEMON_REGISTRATION_EXCEPTION` @@ -5595,11 +5609,11 @@ func (f *TerminationReasonCode) String() string { // Set raw string value and validate it against allowed values func (f *TerminationReasonCode) Set(v string) error { switch v { - case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: + case `ABUSE_DETECTED`, `ACCESS_TOKEN_FAILURE`, `ALLOCATION_TIMEOUT`, `ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY`, `ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_READY_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS`, `ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS`, `ATTACH_PROJECT_FAILURE`, `AWS_AUTHORIZATION_FAILURE`, `AWS_INACCESSIBLE_KMS_KEY_FAILURE`, `AWS_INSTANCE_PROFILE_UPDATE_FAILURE`, `AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE`, `AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE`, `AWS_INVALID_KEY_PAIR`, `AWS_INVALID_KMS_KEY_STATE`, `AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE`, `AWS_REQUEST_LIMIT_EXCEEDED`, `AWS_RESOURCE_QUOTA_EXCEEDED`, `AWS_UNSUPPORTED_FAILURE`, `AZURE_BYOK_KEY_PERMISSION_FAILURE`, `AZURE_EPHEMERAL_DISK_FAILURE`, `AZURE_INVALID_DEPLOYMENT_TEMPLATE`, `AZURE_OPERATION_NOT_ALLOWED_EXCEPTION`, `AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE`, `AZURE_QUOTA_EXCEEDED_EXCEPTION`, `AZURE_RESOURCE_MANAGER_THROTTLING`, `AZURE_RESOURCE_PROVIDER_THROTTLING`, `AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE`, `AZURE_VM_EXTENSION_FAILURE`, `AZURE_VNET_CONFIGURATION_FAILURE`, `BOOTSTRAP_TIMEOUT`, `BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION`, `BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG`, `BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED`, `BUDGET_POLICY_RESOLUTION_FAILURE`, `CLOUD_ACCOUNT_SETUP_FAILURE`, `CLOUD_OPERATION_CANCELLED`, `CLOUD_PROVIDER_DISK_SETUP_FAILURE`, `CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED`, `CLOUD_PROVIDER_LAUNCH_FAILURE`, `CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT`, `CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG`, `CLOUD_PROVIDER_SHUTDOWN`, `CLUSTER_OPERATION_THROTTLED`, `CLUSTER_OPERATION_TIMEOUT`, `COMMUNICATION_LOST`, `CONTAINER_LAUNCH_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE`, `CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG`, `DATABASE_CONNECTION_FAILURE`, `DATA_ACCESS_CONFIG_CHANGED`, `DBFS_COMPONENT_UNHEALTHY`, `DISASTER_RECOVERY_REPLICATION`, `DNS_RESOLUTION_ERROR`, `DOCKER_CONTAINER_CREATION_EXCEPTION`, `DOCKER_IMAGE_PULL_FAILURE`, `DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION`, `DOCKER_INVALID_OS_EXCEPTION`, `DRIVER_EVICTION`, `DRIVER_LAUNCH_TIMEOUT`, `DRIVER_NODE_UNREACHABLE`, `DRIVER_OUT_OF_DISK`, `DRIVER_OUT_OF_MEMORY`, `DRIVER_POD_CREATION_FAILURE`, `DRIVER_UNEXPECTED_FAILURE`, `DRIVER_UNREACHABLE`, `DRIVER_UNRESPONSIVE`, `DYNAMIC_SPARK_CONF_SIZE_EXCEEDED`, `EOS_SPARK_IMAGE`, `EXECUTION_COMPONENT_UNHEALTHY`, `EXECUTOR_POD_UNSCHEDULED`, `GCP_API_RATE_QUOTA_EXCEEDED`, `GCP_DENIED_BY_ORG_POLICY`, `GCP_FORBIDDEN`, `GCP_IAM_TIMEOUT`, `GCP_INACCESSIBLE_KMS_KEY_FAILURE`, `GCP_INSUFFICIENT_CAPACITY`, `GCP_IP_SPACE_EXHAUSTED`, `GCP_KMS_KEY_PERMISSION_DENIED`, `GCP_NOT_FOUND`, `GCP_QUOTA_EXCEEDED`, `GCP_RESOURCE_QUOTA_EXCEEDED`, `GCP_SERVICE_ACCOUNT_ACCESS_DENIED`, `GCP_SERVICE_ACCOUNT_DELETED`, `GCP_SERVICE_ACCOUNT_NOT_FOUND`, `GCP_SUBNET_NOT_READY`, `GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED`, `GKE_BASED_CLUSTER_TERMINATION`, `GLOBAL_INIT_SCRIPT_FAILURE`, `HIVE_METASTORE_PROVISIONING_FAILURE`, `IMAGE_PULL_PERMISSION_DENIED`, `INACTIVITY`, `INIT_CONTAINER_NOT_FINISHED`, `INIT_SCRIPT_FAILURE`, `INSTANCE_POOL_CLUSTER_FAILURE`, `INSTANCE_POOL_MAX_CAPACITY_REACHED`, `INSTANCE_POOL_NOT_FOUND`, `INSTANCE_UNREACHABLE`, `INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG`, `INTERNAL_CAPACITY_FAILURE`, `INTERNAL_ERROR`, `INVALID_ARGUMENT`, `INVALID_AWS_PARAMETER`, `INVALID_INSTANCE_PLACEMENT_PROTOCOL`, `INVALID_SPARK_IMAGE`, `INVALID_WORKER_IMAGE_FAILURE`, `IN_PENALTY_BOX`, `IP_EXHAUSTION_FAILURE`, `JOB_FINISHED`, `K8S_AUTOSCALING_FAILURE`, `K8S_DBR_CLUSTER_LAUNCH_TIMEOUT`, `LAZY_ALLOCATION_TIMEOUT`, `MAINTENANCE_MODE`, `METASTORE_COMPONENT_UNHEALTHY`, `NEPHOS_RESOURCE_MANAGEMENT`, `NETVISOR_SETUP_TIMEOUT`, `NETWORK_CHECK_CONTROL_PLANE_FAILURE`, `NETWORK_CHECK_DNS_SERVER_FAILURE`, `NETWORK_CHECK_METADATA_ENDPOINT_FAILURE`, `NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE`, `NETWORK_CHECK_NIC_FAILURE`, `NETWORK_CHECK_STORAGE_FAILURE`, `NETWORK_CONFIGURATION_FAILURE`, `NFS_MOUNT_FAILURE`, `NO_MATCHED_K8S`, `NO_MATCHED_K8S_TESTING_TAG`, `NPIP_TUNNEL_SETUP_FAILURE`, `NPIP_TUNNEL_TOKEN_FAILURE`, `POD_ASSIGNMENT_FAILURE`, `POD_SCHEDULING_FAILURE`, `REQUEST_REJECTED`, `REQUEST_THROTTLED`, `RESOURCE_USAGE_BLOCKED`, `SECRET_CREATION_FAILURE`, `SECRET_PERMISSION_DENIED`, `SECRET_RESOLUTION_ERROR`, `SECURITY_DAEMON_REGISTRATION_EXCEPTION`, `SELF_BOOTSTRAP_FAILURE`, `SERVERLESS_LONG_RUNNING_TERMINATED`, `SKIPPED_SLOW_NODES`, `SLOW_IMAGE_DOWNLOAD`, `SPARK_ERROR`, `SPARK_IMAGE_DOWNLOAD_FAILURE`, `SPARK_IMAGE_DOWNLOAD_THROTTLED`, `SPARK_IMAGE_NOT_FOUND`, `SPARK_STARTUP_FAILURE`, `SPOT_INSTANCE_TERMINATION`, `SSH_BOOTSTRAP_FAILURE`, `STORAGE_DOWNLOAD_FAILURE`, `STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG`, `STORAGE_DOWNLOAD_FAILURE_SLOW`, `STORAGE_DOWNLOAD_FAILURE_THROTTLED`, `STS_CLIENT_SETUP_FAILURE`, `SUBNET_EXHAUSTED_FAILURE`, `TEMPORARILY_UNAVAILABLE`, `TRIAL_EXPIRED`, `UNEXPECTED_LAUNCH_FAILURE`, `UNEXPECTED_POD_RECREATION`, `UNKNOWN`, `UNSUPPORTED_INSTANCE_TYPE`, `UPDATE_INSTANCE_PROFILE_FAILURE`, `USER_INITIATED_VM_TERMINATION`, `USER_REQUEST`, `WORKER_SETUP_FAILURE`, `WORKSPACE_CANCELLED_ERROR`, `WORKSPACE_CONFIGURATION_ERROR`, `WORKSPACE_UPDATE`: *f = TerminationReasonCode(v) return nil default: - return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) + return fmt.Errorf(`value "%s" is not one of "ABUSE_DETECTED", "ACCESS_TOKEN_FAILURE", "ALLOCATION_TIMEOUT", "ALLOCATION_TIMEOUT_NODE_DAEMON_NOT_READY", "ALLOCATION_TIMEOUT_NO_HEALTHY_AND_WARMED_UP_CLUSTERS", "ALLOCATION_TIMEOUT_NO_HEALTHY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_MATCHED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_READY_CLUSTERS", "ALLOCATION_TIMEOUT_NO_UNALLOCATED_CLUSTERS", "ALLOCATION_TIMEOUT_NO_WARMED_UP_CLUSTERS", "ATTACH_PROJECT_FAILURE", "AWS_AUTHORIZATION_FAILURE", "AWS_INACCESSIBLE_KMS_KEY_FAILURE", "AWS_INSTANCE_PROFILE_UPDATE_FAILURE", "AWS_INSUFFICIENT_FREE_ADDRESSES_IN_SUBNET_FAILURE", "AWS_INSUFFICIENT_INSTANCE_CAPACITY_FAILURE", "AWS_INVALID_KEY_PAIR", "AWS_INVALID_KMS_KEY_STATE", "AWS_MAX_SPOT_INSTANCE_COUNT_EXCEEDED_FAILURE", "AWS_REQUEST_LIMIT_EXCEEDED", "AWS_RESOURCE_QUOTA_EXCEEDED", "AWS_UNSUPPORTED_FAILURE", "AZURE_BYOK_KEY_PERMISSION_FAILURE", "AZURE_EPHEMERAL_DISK_FAILURE", "AZURE_INVALID_DEPLOYMENT_TEMPLATE", "AZURE_OPERATION_NOT_ALLOWED_EXCEPTION", "AZURE_PACKED_DEPLOYMENT_PARTIAL_FAILURE", "AZURE_QUOTA_EXCEEDED_EXCEPTION", "AZURE_RESOURCE_MANAGER_THROTTLING", "AZURE_RESOURCE_PROVIDER_THROTTLING", "AZURE_UNEXPECTED_DEPLOYMENT_TEMPLATE_FAILURE", "AZURE_VM_EXTENSION_FAILURE", "AZURE_VNET_CONFIGURATION_FAILURE", "BOOTSTRAP_TIMEOUT", "BOOTSTRAP_TIMEOUT_CLOUD_PROVIDER_EXCEPTION", "BOOTSTRAP_TIMEOUT_DUE_TO_MISCONFIG", "BUDGET_POLICY_LIMIT_ENFORCEMENT_ACTIVATED", "BUDGET_POLICY_RESOLUTION_FAILURE", "CLOUD_ACCOUNT_SETUP_FAILURE", "CLOUD_OPERATION_CANCELLED", "CLOUD_PROVIDER_DISK_SETUP_FAILURE", "CLOUD_PROVIDER_INSTANCE_NOT_LAUNCHED", "CLOUD_PROVIDER_LAUNCH_FAILURE", "CLOUD_PROVIDER_LAUNCH_FAILURE_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_RESOURCE_STOCKOUT", "CLOUD_PROVIDER_RESOURCE_STOCKOUT_DUE_TO_MISCONFIG", "CLOUD_PROVIDER_SHUTDOWN", "CLUSTER_OPERATION_THROTTLED", "CLUSTER_OPERATION_TIMEOUT", "COMMUNICATION_LOST", "CONTAINER_LAUNCH_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE", "CONTROL_PLANE_REQUEST_FAILURE_DUE_TO_MISCONFIG", "DATABASE_CONNECTION_FAILURE", "DATA_ACCESS_CONFIG_CHANGED", "DBFS_COMPONENT_UNHEALTHY", "DISASTER_RECOVERY_REPLICATION", "DNS_RESOLUTION_ERROR", "DOCKER_CONTAINER_CREATION_EXCEPTION", "DOCKER_IMAGE_PULL_FAILURE", "DOCKER_IMAGE_TOO_LARGE_FOR_INSTANCE_EXCEPTION", "DOCKER_INVALID_OS_EXCEPTION", "DRIVER_EVICTION", "DRIVER_LAUNCH_TIMEOUT", "DRIVER_NODE_UNREACHABLE", "DRIVER_OUT_OF_DISK", "DRIVER_OUT_OF_MEMORY", "DRIVER_POD_CREATION_FAILURE", "DRIVER_UNEXPECTED_FAILURE", "DRIVER_UNREACHABLE", "DRIVER_UNRESPONSIVE", "DYNAMIC_SPARK_CONF_SIZE_EXCEEDED", "EOS_SPARK_IMAGE", "EXECUTION_COMPONENT_UNHEALTHY", "EXECUTOR_POD_UNSCHEDULED", "GCP_API_RATE_QUOTA_EXCEEDED", "GCP_DENIED_BY_ORG_POLICY", "GCP_FORBIDDEN", "GCP_IAM_TIMEOUT", "GCP_INACCESSIBLE_KMS_KEY_FAILURE", "GCP_INSUFFICIENT_CAPACITY", "GCP_IP_SPACE_EXHAUSTED", "GCP_KMS_KEY_PERMISSION_DENIED", "GCP_NOT_FOUND", "GCP_QUOTA_EXCEEDED", "GCP_RESOURCE_QUOTA_EXCEEDED", "GCP_SERVICE_ACCOUNT_ACCESS_DENIED", "GCP_SERVICE_ACCOUNT_DELETED", "GCP_SERVICE_ACCOUNT_NOT_FOUND", "GCP_SUBNET_NOT_READY", "GCP_TRUSTED_IMAGE_PROJECTS_VIOLATED", "GKE_BASED_CLUSTER_TERMINATION", "GLOBAL_INIT_SCRIPT_FAILURE", "HIVE_METASTORE_PROVISIONING_FAILURE", "IMAGE_PULL_PERMISSION_DENIED", "INACTIVITY", "INIT_CONTAINER_NOT_FINISHED", "INIT_SCRIPT_FAILURE", "INSTANCE_POOL_CLUSTER_FAILURE", "INSTANCE_POOL_MAX_CAPACITY_REACHED", "INSTANCE_POOL_NOT_FOUND", "INSTANCE_UNREACHABLE", "INSTANCE_UNREACHABLE_DUE_TO_MISCONFIG", "INTERNAL_CAPACITY_FAILURE", "INTERNAL_ERROR", "INVALID_ARGUMENT", "INVALID_AWS_PARAMETER", "INVALID_INSTANCE_PLACEMENT_PROTOCOL", "INVALID_SPARK_IMAGE", "INVALID_WORKER_IMAGE_FAILURE", "IN_PENALTY_BOX", "IP_EXHAUSTION_FAILURE", "JOB_FINISHED", "K8S_AUTOSCALING_FAILURE", "K8S_DBR_CLUSTER_LAUNCH_TIMEOUT", "LAZY_ALLOCATION_TIMEOUT", "MAINTENANCE_MODE", "METASTORE_COMPONENT_UNHEALTHY", "NEPHOS_RESOURCE_MANAGEMENT", "NETVISOR_SETUP_TIMEOUT", "NETWORK_CHECK_CONTROL_PLANE_FAILURE", "NETWORK_CHECK_DNS_SERVER_FAILURE", "NETWORK_CHECK_METADATA_ENDPOINT_FAILURE", "NETWORK_CHECK_MULTIPLE_COMPONENTS_FAILURE", "NETWORK_CHECK_NIC_FAILURE", "NETWORK_CHECK_STORAGE_FAILURE", "NETWORK_CONFIGURATION_FAILURE", "NFS_MOUNT_FAILURE", "NO_MATCHED_K8S", "NO_MATCHED_K8S_TESTING_TAG", "NPIP_TUNNEL_SETUP_FAILURE", "NPIP_TUNNEL_TOKEN_FAILURE", "POD_ASSIGNMENT_FAILURE", "POD_SCHEDULING_FAILURE", "REQUEST_REJECTED", "REQUEST_THROTTLED", "RESOURCE_USAGE_BLOCKED", "SECRET_CREATION_FAILURE", "SECRET_PERMISSION_DENIED", "SECRET_RESOLUTION_ERROR", "SECURITY_DAEMON_REGISTRATION_EXCEPTION", "SELF_BOOTSTRAP_FAILURE", "SERVERLESS_LONG_RUNNING_TERMINATED", "SKIPPED_SLOW_NODES", "SLOW_IMAGE_DOWNLOAD", "SPARK_ERROR", "SPARK_IMAGE_DOWNLOAD_FAILURE", "SPARK_IMAGE_DOWNLOAD_THROTTLED", "SPARK_IMAGE_NOT_FOUND", "SPARK_STARTUP_FAILURE", "SPOT_INSTANCE_TERMINATION", "SSH_BOOTSTRAP_FAILURE", "STORAGE_DOWNLOAD_FAILURE", "STORAGE_DOWNLOAD_FAILURE_DUE_TO_MISCONFIG", "STORAGE_DOWNLOAD_FAILURE_SLOW", "STORAGE_DOWNLOAD_FAILURE_THROTTLED", "STS_CLIENT_SETUP_FAILURE", "SUBNET_EXHAUSTED_FAILURE", "TEMPORARILY_UNAVAILABLE", "TRIAL_EXPIRED", "UNEXPECTED_LAUNCH_FAILURE", "UNEXPECTED_POD_RECREATION", "UNKNOWN", "UNSUPPORTED_INSTANCE_TYPE", "UPDATE_INSTANCE_PROFILE_FAILURE", "USER_INITIATED_VM_TERMINATION", "USER_REQUEST", "WORKER_SETUP_FAILURE", "WORKSPACE_CANCELLED_ERROR", "WORKSPACE_CONFIGURATION_ERROR", "WORKSPACE_UPDATE"`, v) } } diff --git a/service/dashboards/api.go b/service/dashboards/api.go index 5e3771be4..3e34b55ac 100755 --- a/service/dashboards/api.go +++ b/service/dashboards/api.go @@ -249,10 +249,10 @@ func (a *GenieAPI) CreateMessage(ctx context.Context, genieCreateConversationMes return &WaitGetMessageGenieCompleted[GenieMessage]{ Response: genieMessage, ConversationId: genieCreateConversationMessageRequest.ConversationId, - MessageId: genieMessage.Id, + MessageId: genieMessage.MessageId, SpaceId: genieCreateConversationMessageRequest.SpaceId, Poll: func(timeout time.Duration, callback func(*GenieMessage)) (*GenieMessage, error) { - return a.WaitGetMessageGenieCompleted(ctx, genieCreateConversationMessageRequest.ConversationId, genieMessage.Id, genieCreateConversationMessageRequest.SpaceId, timeout, callback) + return a.WaitGetMessageGenieCompleted(ctx, genieCreateConversationMessageRequest.ConversationId, genieMessage.MessageId, genieCreateConversationMessageRequest.SpaceId, timeout, callback) }, timeout: 20 * time.Minute, callback: nil, diff --git a/service/files/model.go b/service/files/model.go index 1205e1281..3c406ade5 100755 --- a/service/files/model.go +++ b/service/files/model.go @@ -372,7 +372,8 @@ type UploadRequest struct { Contents io.ReadCloser `json:"-"` // The absolute path of the file. FilePath string `json:"-" url:"-"` - // If true, an existing file will be overwritten. + // If true or unspecified, an existing file will be overwritten. If false, + // an error will be returned if the path points to an existing file. Overwrite bool `json:"-" url:"overwrite,omitempty"` ForceSendFields []string `json:"-" url:"-"` diff --git a/service/ml/model.go b/service/ml/model.go index b0e0c0491..80d5f6c48 100755 --- a/service/ml/model.go +++ b/service/ml/model.go @@ -410,6 +410,10 @@ type CreateForecastingExperimentRequest struct { // as a multiple of forecast_granularity. This value represents how far // ahead the model should forecast. ForecastHorizon int64 `json:"forecast_horizon"` + // The fully qualified path of a Unity Catalog table, formatted as + // catalog_name.schema_name.table_name, used to store future feature data + // for predictions. + FutureFeatureDataPath string `json:"future_feature_data_path,omitempty"` // The region code(s) to automatically add holiday features. Currently // supports only one region. HolidayRegions []string `json:"holiday_regions,omitempty"` @@ -444,7 +448,7 @@ type CreateForecastingExperimentRequest struct { // The column in the training table used to group the dataset for predicting // individual time series. TimeseriesIdentifierColumns []string `json:"timeseries_identifier_columns,omitempty"` - // The fully qualified name of a Unity Catalog table, formatted as + // The fully qualified path of a Unity Catalog table, formatted as // catalog_name.schema_name.table_name, used as training data for the // forecasting model. TrainDataPath string `json:"train_data_path"` diff --git a/service/pipelines/impl.go b/service/pipelines/impl.go index 588d752fd..53232ecd0 100755 --- a/service/pipelines/impl.go +++ b/service/pipelines/impl.go @@ -110,8 +110,7 @@ func (a *pipelinesImpl) ListPipelineEvents(ctx context.Context, request ListPipe // Retrieves events for a pipeline. func (a *pipelinesImpl) ListPipelineEventsAll(ctx context.Context, request ListPipelineEventsRequest) ([]PipelineEvent, error) { iterator := a.ListPipelineEvents(ctx, request) - return listing.ToSliceN[PipelineEvent, int](ctx, iterator, request.MaxResults) - + return listing.ToSlice[PipelineEvent](ctx, iterator) } func (a *pipelinesImpl) internalListPipelineEvents(ctx context.Context, request ListPipelineEventsRequest) (*ListPipelineEventsResponse, error) { @@ -156,8 +155,7 @@ func (a *pipelinesImpl) ListPipelines(ctx context.Context, request ListPipelines // Lists pipelines defined in the Delta Live Tables system. func (a *pipelinesImpl) ListPipelinesAll(ctx context.Context, request ListPipelinesRequest) ([]PipelineStateInfo, error) { iterator := a.ListPipelines(ctx, request) - return listing.ToSliceN[PipelineStateInfo, int](ctx, iterator, request.MaxResults) - + return listing.ToSlice[PipelineStateInfo](ctx, iterator) } func (a *pipelinesImpl) internalListPipelines(ctx context.Context, request ListPipelinesRequest) (*ListPipelinesResponse, error) { diff --git a/service/pipelines/model.go b/service/pipelines/model.go index b1c2795da..f7a6c092d 100755 --- a/service/pipelines/model.go +++ b/service/pipelines/model.go @@ -128,7 +128,7 @@ type DataPlaneId struct { // The instance name of the data plane emitting an event. Instance string `json:"instance,omitempty"` // A sequence number, unique and increasing within the data plane instance. - SeqNo int `json:"seq_no,omitempty"` + SeqNo int64 `json:"seq_no,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -266,7 +266,7 @@ type EditPipeline struct { // Whether Photon is enabled for this pipeline. Photon bool `json:"photon,omitempty"` // Unique identifier for this pipeline. - PipelineId string `json:"pipeline_id,omitempty" url:"-"` + PipelineId string `json:"-" url:"-"` // Restart window of this pipeline. RestartWindow *RestartWindow `json:"restart_window,omitempty"` // Write-only setting, available only in Create/Update calls. Specifies the @@ -374,7 +374,7 @@ func (s EventLogSpec) MarshalJSON() ([]byte, error) { } type FileLibrary struct { - // The absolute path of the file. + // The absolute path of the source code. Path string `json:"path,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -513,10 +513,10 @@ type IngestionGatewayPipelineDefinition struct { ConnectionId string `json:"connection_id,omitempty"` // Immutable. The Unity Catalog connection that this gateway pipeline uses // to communicate with the source. - ConnectionName string `json:"connection_name,omitempty"` + ConnectionName string `json:"connection_name"` // Required, Immutable. The name of the catalog for the gateway pipeline's // storage location. - GatewayStorageCatalog string `json:"gateway_storage_catalog,omitempty"` + GatewayStorageCatalog string `json:"gateway_storage_catalog"` // Optional. The Unity Catalog-compatible name for the gateway storage // location. This is the destination to use for the data that is extracted // by the gateway. Delta Live Tables system will automatically create the @@ -524,7 +524,7 @@ type IngestionGatewayPipelineDefinition struct { GatewayStorageName string `json:"gateway_storage_name,omitempty"` // Required, Immutable. The name of the schema for the gateway pipelines's // storage location. - GatewayStorageSchema string `json:"gateway_storage_schema,omitempty"` + GatewayStorageSchema string `json:"gateway_storage_schema"` ForceSendFields []string `json:"-" url:"-"` } @@ -587,7 +587,7 @@ type ListPipelineEventsRequest struct { // with all fields in this request except max_results. An error is returned // if any fields other than max_results are set when this field is set. PageToken string `json:"-" url:"page_token,omitempty"` - + // The pipeline to return events for. PipelineId string `json:"-" url:"-"` ForceSendFields []string `json:"-" url:"-"` @@ -749,7 +749,7 @@ func (f *MaturityLevel) Type() string { } type NotebookLibrary struct { - // The absolute path of the notebook. + // The absolute path of the source code. Path string `json:"path,omitempty"` ForceSendFields []string `json:"-" url:"-"` @@ -778,7 +778,7 @@ type Notifications struct { type Origin struct { // The id of a batch. Unique within a flow. - BatchId int `json:"batch_id,omitempty"` + BatchId int64 `json:"batch_id,omitempty"` // The cloud provider, e.g., AWS or Azure. Cloud string `json:"cloud,omitempty"` // The id of the cluster where an execution happens. Unique within a region. @@ -797,7 +797,7 @@ type Origin struct { // Materialization name. MaterializationName string `json:"materialization_name,omitempty"` // The org id of the user. Unique within a cloud. - OrgId int `json:"org_id,omitempty"` + OrgId int64 `json:"org_id,omitempty"` // The id of the pipeline. Globally unique. PipelineId string `json:"pipeline_id,omitempty"` // The name of the pipeline. Not unique. @@ -1025,7 +1025,7 @@ func (f *PipelineClusterAutoscaleMode) Type() string { type PipelineDeployment struct { // The deployment method that manages the pipeline. - Kind DeploymentKind `json:"kind,omitempty"` + Kind DeploymentKind `json:"kind"` // The path to the file containing metadata about the deployment. MetadataFilePath string `json:"metadata_file_path,omitempty"` @@ -1364,14 +1364,14 @@ type PipelineTrigger struct { type ReportSpec struct { // Required. Destination catalog to store table. - DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationCatalog string `json:"destination_catalog"` // Required. Destination schema to store table. - DestinationSchema string `json:"destination_schema,omitempty"` + DestinationSchema string `json:"destination_schema"` // Required. Destination table name. The pipeline fails if a table with that // name already exists. DestinationTable string `json:"destination_table,omitempty"` // Required. Report URL in the source system. - SourceUrl string `json:"source_url,omitempty"` + SourceUrl string `json:"source_url"` // Configuration settings to control the ingestion of tables. These settings // override the table_configuration defined in the // IngestionPipelineDefinition object. @@ -1440,16 +1440,16 @@ func (s RunAs) MarshalJSON() ([]byte, error) { type SchemaSpec struct { // Required. Destination catalog to store tables. - DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationCatalog string `json:"destination_catalog"` // Required. Destination schema to store tables in. Tables with the same // name as the source tables are created in this destination schema. The // pipeline fails If a table with the same name already exists. - DestinationSchema string `json:"destination_schema,omitempty"` + DestinationSchema string `json:"destination_schema"` // The source catalog name. Might be optional depending on the type of // source. SourceCatalog string `json:"source_catalog,omitempty"` // Required. Schema name in the source database. - SourceSchema string `json:"source_schema,omitempty"` + SourceSchema string `json:"source_schema"` // Configuration settings to control the ingestion of tables. These settings // are applied to all tables in this schema and override the // table_configuration defined in the IngestionPipelineDefinition object. @@ -1468,7 +1468,7 @@ func (s SchemaSpec) MarshalJSON() ([]byte, error) { type Sequencing struct { // A sequence number, unique and increasing within the control plane. - ControlPlaneSeqNo int `json:"control_plane_seq_no,omitempty"` + ControlPlaneSeqNo int64 `json:"control_plane_seq_no,omitempty"` // the ID assigned by the data plane. DataPlaneId *DataPlaneId `json:"data_plane_id,omitempty"` @@ -1524,6 +1524,7 @@ func (s StackFrame) MarshalJSON() ([]byte, error) { } type StartUpdate struct { + // What triggered this update. Cause StartUpdateCause `json:"cause,omitempty"` // If true, this update will reset all tables before running. FullRefresh bool `json:"full_refresh,omitempty"` @@ -1554,6 +1555,7 @@ func (s StartUpdate) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// What triggered this update. type StartUpdateCause string const StartUpdateCauseApiCall StartUpdateCause = `API_CALL` @@ -1613,9 +1615,9 @@ type StopRequest struct { type TableSpec struct { // Required. Destination catalog to store table. - DestinationCatalog string `json:"destination_catalog,omitempty"` + DestinationCatalog string `json:"destination_catalog"` // Required. Destination schema to store table. - DestinationSchema string `json:"destination_schema,omitempty"` + DestinationSchema string `json:"destination_schema"` // Optional. Destination table name. The pipeline fails if a table with that // name already exists. If not set, the source table name is used. DestinationTable string `json:"destination_table,omitempty"` @@ -1625,7 +1627,7 @@ type TableSpec struct { // type of source. SourceSchema string `json:"source_schema,omitempty"` // Required. Table name in the source database. - SourceTable string `json:"source_table,omitempty"` + SourceTable string `json:"source_table"` // Configuration settings to control the ingestion of tables. These settings // override the table_configuration defined in the // IngestionPipelineDefinition object and the SchemaSpec. @@ -1643,6 +1645,18 @@ func (s TableSpec) MarshalJSON() ([]byte, error) { } type TableSpecificConfig struct { + // A list of column names to be excluded for the ingestion. When not + // specified, include_columns fully controls what columns to be ingested. + // When specified, all other columns including future ones will be + // automatically included for ingestion. This field in mutually exclusive + // with `include_columns`. + ExcludeColumns []string `json:"exclude_columns,omitempty"` + // A list of column names to be included for the ingestion. When not + // specified, all columns except ones in exclude_columns will be included. + // Future columns will be automatically included. When specified, all other + // future columns will be automatically excluded from ingestion. This field + // in mutually exclusive with `exclude_columns`. + IncludeColumns []string `json:"include_columns,omitempty"` // The primary key of the table used to apply changes. PrimaryKeys []string `json:"primary_keys,omitempty"` // If true, formula fields defined in the table are included in the @@ -1821,7 +1835,7 @@ func (f *UpdateInfoState) Type() string { type UpdateStateInfo struct { CreationTime string `json:"creation_time,omitempty"` - + // The update state. State UpdateStateInfoState `json:"state,omitempty"` UpdateId string `json:"update_id,omitempty"` @@ -1837,6 +1851,7 @@ func (s UpdateStateInfo) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } +// The update state. type UpdateStateInfoState string const UpdateStateInfoStateCanceled UpdateStateInfoState = `CANCELED` diff --git a/service/pkg.go b/service/pkg.go index 1d327c7dc..751c65297 100644 --- a/service/pkg.go +++ b/service/pkg.go @@ -54,10 +54,10 @@ // // - [marketplace.ConsumerProvidersAPI]: Providers are the entities that publish listings to the Marketplace. // -// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. -// // - [provisioning.CredentialsAPI]: These APIs manage credential configurations for this workspace. // +// - [catalog.CredentialsAPI]: A credential represents an authentication and authorization mechanism for accessing services on your cloud tenant. +// // - [settings.CredentialsManagerAPI]: Credentials manager interacts with with Identity Providers to to perform token exchanges using stored credentials and refresh tokens. // // - [settings.CspEnablementAccountAPI]: The compliance security profile settings at the account level control whether to enable it for new workspaces. diff --git a/service/sql/api.go b/service/sql/api.go index a41db187c..eff8a812e 100755 --- a/service/sql/api.go +++ b/service/sql/api.go @@ -401,32 +401,32 @@ type AlertsV2Interface interface { // Gets a list of alerts accessible to the user, ordered by creation time. // // This method is generated by Databricks SDK Code Generator. - ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[ListAlertsV2ResponseAlert] + ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[AlertV2] // List alerts. // // Gets a list of alerts accessible to the user, ordered by creation time. // // This method is generated by Databricks SDK Code Generator. - ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]ListAlertsV2ResponseAlert, error) + ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]AlertV2, error) - // ListAlertsV2ResponseAlertDisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [ListAlertsV2ResponseAlert].DisplayName as key and [ListAlertsV2ResponseAlert].Id as value. + // AlertV2DisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [AlertV2].DisplayName as key and [AlertV2].Id as value. // - // Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. + // Returns an error if there's more than one [AlertV2] with the same .DisplayName. // - // Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before creating a map. + // Note: All [AlertV2] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. - ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) + AlertV2DisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) - // GetByDisplayName calls [AlertsV2API.ListAlertsV2ResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsV2ResponseAlert]. + // GetByDisplayName calls [AlertsV2API.AlertV2DisplayNameToIdMap] and returns a single [AlertV2]. // - // Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. + // Returns an error if there's more than one [AlertV2] with the same .DisplayName. // - // Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before returning matching by name. + // Note: All [AlertV2] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. - GetByDisplayName(ctx context.Context, name string) (*ListAlertsV2ResponseAlert, error) + GetByDisplayName(ctx context.Context, name string) (*AlertV2, error) // Delete an alert. // @@ -470,14 +470,14 @@ func (a *AlertsV2API) GetAlertById(ctx context.Context, id string) (*AlertV2, er }) } -// ListAlertsV2ResponseAlertDisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [ListAlertsV2ResponseAlert].DisplayName as key and [ListAlertsV2ResponseAlert].Id as value. +// AlertV2DisplayNameToIdMap calls [AlertsV2API.ListAlertsAll] and creates a map of results with [AlertV2].DisplayName as key and [AlertV2].Id as value. // -// Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. +// Returns an error if there's more than one [AlertV2] with the same .DisplayName. // -// Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before creating a map. +// Note: All [AlertV2] instances are loaded into memory before creating a map. // // This method is generated by Databricks SDK Code Generator. -func (a *AlertsV2API) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) { +func (a *AlertsV2API) AlertV2DisplayNameToIdMap(ctx context.Context, request ListAlertsV2Request) (map[string]string, error) { ctx = useragent.InContext(ctx, "sdk-feature", "name-to-id") mapping := map[string]string{} result, err := a.ListAlertsAll(ctx, request) @@ -495,30 +495,30 @@ func (a *AlertsV2API) ListAlertsV2ResponseAlertDisplayNameToIdMap(ctx context.Co return mapping, nil } -// GetByDisplayName calls [AlertsV2API.ListAlertsV2ResponseAlertDisplayNameToIdMap] and returns a single [ListAlertsV2ResponseAlert]. +// GetByDisplayName calls [AlertsV2API.AlertV2DisplayNameToIdMap] and returns a single [AlertV2]. // -// Returns an error if there's more than one [ListAlertsV2ResponseAlert] with the same .DisplayName. +// Returns an error if there's more than one [AlertV2] with the same .DisplayName. // -// Note: All [ListAlertsV2ResponseAlert] instances are loaded into memory before returning matching by name. +// Note: All [AlertV2] instances are loaded into memory before returning matching by name. // // This method is generated by Databricks SDK Code Generator. -func (a *AlertsV2API) GetByDisplayName(ctx context.Context, name string) (*ListAlertsV2ResponseAlert, error) { +func (a *AlertsV2API) GetByDisplayName(ctx context.Context, name string) (*AlertV2, error) { ctx = useragent.InContext(ctx, "sdk-feature", "get-by-name") result, err := a.ListAlertsAll(ctx, ListAlertsV2Request{}) if err != nil { return nil, err } - tmp := map[string][]ListAlertsV2ResponseAlert{} + tmp := map[string][]AlertV2{} for _, v := range result { key := v.DisplayName tmp[key] = append(tmp[key], v) } alternatives, ok := tmp[name] if !ok || len(alternatives) == 0 { - return nil, fmt.Errorf("ListAlertsV2ResponseAlert named '%s' does not exist", name) + return nil, fmt.Errorf("AlertV2 named '%s' does not exist", name) } if len(alternatives) > 1 { - return nil, fmt.Errorf("there are %d instances of ListAlertsV2ResponseAlert named '%s'", len(alternatives), name) + return nil, fmt.Errorf("there are %d instances of AlertV2 named '%s'", len(alternatives), name) } return &alternatives[0], nil } diff --git a/service/sql/impl.go b/service/sql/impl.go index c43cd5e31..c10f98d3d 100755 --- a/service/sql/impl.go +++ b/service/sql/impl.go @@ -194,13 +194,13 @@ func (a *alertsV2Impl) GetAlert(ctx context.Context, request GetAlertV2Request) // List alerts. // // Gets a list of alerts accessible to the user, ordered by creation time. -func (a *alertsV2Impl) ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[ListAlertsV2ResponseAlert] { +func (a *alertsV2Impl) ListAlerts(ctx context.Context, request ListAlertsV2Request) listing.Iterator[AlertV2] { getNextPage := func(ctx context.Context, req ListAlertsV2Request) (*ListAlertsV2Response, error) { ctx = useragent.InContext(ctx, "sdk-feature", "pagination") return a.internalListAlerts(ctx, req) } - getItems := func(resp *ListAlertsV2Response) []ListAlertsV2ResponseAlert { + getItems := func(resp *ListAlertsV2Response) []AlertV2 { return resp.Results } getNextReq := func(resp *ListAlertsV2Response) *ListAlertsV2Request { @@ -221,9 +221,9 @@ func (a *alertsV2Impl) ListAlerts(ctx context.Context, request ListAlertsV2Reque // List alerts. // // Gets a list of alerts accessible to the user, ordered by creation time. -func (a *alertsV2Impl) ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]ListAlertsV2ResponseAlert, error) { +func (a *alertsV2Impl) ListAlertsAll(ctx context.Context, request ListAlertsV2Request) ([]AlertV2, error) { iterator := a.ListAlerts(ctx, request) - return listing.ToSlice[ListAlertsV2ResponseAlert](ctx, iterator) + return listing.ToSlice[AlertV2](ctx, iterator) } func (a *alertsV2Impl) internalListAlerts(ctx context.Context, request ListAlertsV2Request) (*ListAlertsV2Response, error) { diff --git a/service/sql/interface.go b/service/sql/interface.go index 5fcef16a6..631bccb98 100755 --- a/service/sql/interface.go +++ b/service/sql/interface.go @@ -130,7 +130,7 @@ type AlertsV2Service interface { // // Gets a list of alerts accessible to the user, ordered by creation time. // - // Use ListAlertsAll() to get all ListAlertsV2ResponseAlert instances, which will iterate over every result page. + // Use ListAlertsAll() to get all AlertV2 instances, which will iterate over every result page. ListAlerts(ctx context.Context, request ListAlertsV2Request) (*ListAlertsV2Response, error) // Delete an alert. diff --git a/service/sql/model.go b/service/sql/model.go index 9eca5ef2b..31f9f23fd 100755 --- a/service/sql/model.go +++ b/service/sql/model.go @@ -2811,7 +2811,7 @@ func (s ListAlertsV2Request) MarshalJSON() ([]byte, error) { type ListAlertsV2Response struct { NextPageToken string `json:"next_page_token,omitempty"` - Results []ListAlertsV2ResponseAlert `json:"results,omitempty"` + Results []AlertV2 `json:"results,omitempty"` ForceSendFields []string `json:"-" url:"-"` } @@ -2824,47 +2824,6 @@ func (s ListAlertsV2Response) MarshalJSON() ([]byte, error) { return marshal.Marshal(s) } -type ListAlertsV2ResponseAlert struct { - // The timestamp indicating when the alert was created. - CreateTime string `json:"create_time,omitempty"` - // Custom description for the alert. support mustache template. - CustomDescription string `json:"custom_description,omitempty"` - // Custom summary for the alert. support mustache template. - CustomSummary string `json:"custom_summary,omitempty"` - // The display name of the alert. - DisplayName string `json:"display_name,omitempty"` - - Evaluation *AlertV2Evaluation `json:"evaluation,omitempty"` - // UUID identifying the alert. - Id string `json:"id,omitempty"` - // Indicates whether the query is trashed. - LifecycleState LifecycleState `json:"lifecycle_state,omitempty"` - // The owner's username. This field is set to "Unavailable" if the user has - // been deleted. - OwnerUserName string `json:"owner_user_name,omitempty"` - // Text of the query to be run. - QueryText string `json:"query_text,omitempty"` - // The run as username. This field is set to "Unavailable" if the user has - // been deleted. - RunAsUserName string `json:"run_as_user_name,omitempty"` - - Schedule *CronSchedule `json:"schedule,omitempty"` - // The timestamp indicating when the alert was updated. - UpdateTime string `json:"update_time,omitempty"` - // ID of the SQL warehouse attached to the alert. - WarehouseId string `json:"warehouse_id,omitempty"` - - ForceSendFields []string `json:"-" url:"-"` -} - -func (s *ListAlertsV2ResponseAlert) UnmarshalJSON(b []byte) error { - return marshal.Unmarshal(b, s) -} - -func (s ListAlertsV2ResponseAlert) MarshalJSON() ([]byte, error) { - return marshal.Marshal(s) -} - // Get dashboard objects type ListDashboardsRequest struct { // Name of dashboard attribute to order by. diff --git a/service/vectorsearch/model.go b/service/vectorsearch/model.go index d9727496d..80ca3e57c 100755 --- a/service/vectorsearch/model.go +++ b/service/vectorsearch/model.go @@ -678,7 +678,7 @@ func (s QueryVectorIndexResponse) MarshalJSON() ([]byte, error) { // Data returned in the query result. type ResultData struct { // Data rows returned in the query. - DataArray []ListValue `json:"data_array,omitempty"` + DataArray [][]string `json:"data_array,omitempty"` // Number of rows in the result set. RowCount int `json:"row_count,omitempty"` From 13af0878b6d34ffa07009b02050b124d3217f4d1 Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Fri, 2 May 2025 16:25:23 +0000 Subject: [PATCH 50/54] [Release] Release v0.67.0 ## Release v0.67.0 ### Bug Fixes * Fixed the deserialization of responses in VectorSearchAPI's `QueryIndex()` method ([#1214](https://github.com/databricks/databricks-sdk-py/pull/1214)). ### API Changes * Added `FutureFeatureDataPath` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). * Added `ExcludeColumns` and `IncludeColumns` fields for [pipelines.TableSpecificConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpecificConfig). * Added `NetworkCheckControlPlaneFailure`, `NetworkCheckDnsServerFailure`, `NetworkCheckMetadataEndpointFailure`, `NetworkCheckMultipleComponentsFailure`, `NetworkCheckNicFailure`, `NetworkCheckStorageFailure` and `SecretPermissionDenied` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). * [Breaking] Changed [vectorsearch.ListValue](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValue) to. * [Breaking] Changed `PipelineId` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline) to be required. * Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. * [Breaking] Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. * [Breaking] Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. * Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. * [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. * Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. * [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. * Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. * Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. * [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. * [Breaking] Changed pagination for [AlertsV2API.ListAlerts](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API.ListAlerts). * [Breaking] Changed waiter for [GenieAPI.CreateMessage](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI.CreateMessage). --- .release_metadata.json | 2 +- CHANGELOG.md | 25 +++++++++++++++++++++++++ NEXT_CHANGELOG.md | 20 +------------------- version/version.go | 2 +- 4 files changed, 28 insertions(+), 21 deletions(-) diff --git a/.release_metadata.json b/.release_metadata.json index 55c7bb785..7dc90fcb0 100644 --- a/.release_metadata.json +++ b/.release_metadata.json @@ -1,3 +1,3 @@ { - "timestamp": "2025-04-30 11:30:27+0000" + "timestamp": "2025-05-02 16:25:19+0000" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index a54dcd769..849ebab53 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,30 @@ # Version changelog +## Release v0.67.0 + +### Bug Fixes +* Fixed the deserialization of responses in VectorSearchAPI's `QueryIndex()` method ([#1214](https://github.com/databricks/databricks-sdk-py/pull/1214)). + +### API Changes +* Added `FutureFeatureDataPath` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). +* Added `ExcludeColumns` and `IncludeColumns` fields for [pipelines.TableSpecificConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpecificConfig). +* Added `NetworkCheckControlPlaneFailure`, `NetworkCheckDnsServerFailure`, `NetworkCheckMetadataEndpointFailure`, `NetworkCheckMultipleComponentsFailure`, `NetworkCheckNicFailure`, `NetworkCheckStorageFailure` and `SecretPermissionDenied` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). +* [Breaking] Changed [vectorsearch.ListValue](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValue) to. +* [Breaking] Changed `PipelineId` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline) to be required. +* Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. +* [Breaking] Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. +* [Breaking] Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. +* Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. +* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. +* Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. +* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. +* Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. +* Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. +* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. +* [Breaking] Changed pagination for [AlertsV2API.ListAlerts](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API.ListAlerts). +* [Breaking] Changed waiter for [GenieAPI.CreateMessage](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI.CreateMessage). + + ## Release v0.66.0 ### Bug Fixes diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index fc2d039d5..b4585a98d 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,31 +1,13 @@ # NEXT CHANGELOG -## Release v0.67.0 +## Release v0.68.0 ### New Features and Improvements ### Bug Fixes -* Fixed the deserialization of responses in VectorSearchAPI's `QueryIndex()` method ([#1214](https://github.com/databricks/databricks-sdk-py/pull/1214)). ### Documentation ### Internal Changes ### API Changes -* Added `FutureFeatureDataPath` field for [ml.CreateForecastingExperimentRequest](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/ml#CreateForecastingExperimentRequest). -* Added `ExcludeColumns` and `IncludeColumns` fields for [pipelines.TableSpecificConfig](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpecificConfig). -* Added `NetworkCheckControlPlaneFailure`, `NetworkCheckDnsServerFailure`, `NetworkCheckMetadataEndpointFailure`, `NetworkCheckMultipleComponentsFailure`, `NetworkCheckNicFailure`, `NetworkCheckStorageFailure` and `SecretPermissionDenied` enum values for [compute.TerminationReasonCode](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/compute#TerminationReasonCode). -* [Breaking] Changed [vectorsearch.ListValue](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/vectorsearch#ListValue) to. -* [Breaking] Changed `PipelineId` field for [pipelines.EditPipeline](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#EditPipeline) to be required. -* Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. -* [Breaking] Changed `ConnectionName`, `GatewayStorageCatalog` and `GatewayStorageSchema` fields for [pipelines.IngestionGatewayPipelineDefinition](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#IngestionGatewayPipelineDefinition) to be required. -* [Breaking] Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. -* Changed `Kind` field for [pipelines.PipelineDeployment](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#PipelineDeployment) to be required. -* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. -* Changed `DestinationCatalog`, `DestinationSchema` and `SourceUrl` fields for [pipelines.ReportSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#ReportSpec) to be required. -* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. -* Changed `DestinationCatalog`, `DestinationSchema` and `SourceSchema` fields for [pipelines.SchemaSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#SchemaSpec) to be required. -* Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. -* [Breaking] Changed `DestinationCatalog`, `DestinationSchema` and `SourceTable` fields for [pipelines.TableSpec](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/pipelines#TableSpec) to be required. -* [Breaking] Changed pagination for [AlertsV2API.ListAlerts](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/sql#AlertsV2API.ListAlerts). -* [Breaking] Changed waiter for [GenieAPI.CreateMessage](https://pkg.go.dev/github.com/databricks/databricks-sdk-go/service/dashboards#GenieAPI.CreateMessage). diff --git a/version/version.go b/version/version.go index 256c09108..724a984b6 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.66.0" +const Version = "0.67.0" From 8a6d0df33e614ba3165a76b1dc50ce9366828706 Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Tue, 6 May 2025 11:52:36 +0200 Subject: [PATCH 51/54] Add support for OIDC ID token authentication using an environment variable. (#1215) ## What changes are proposed in this pull request? This PR adds a new way to authenticate by reading OIDC ID tokens from environment variables. By default, the new credential strategy attempts to read the token from `DATABRICKS_OIDC_TOKEN`. This default value can be overwritten by setting `DATABRICKS_OIDC_TOKEN_ENV`. The new authentication mode will be tested before Github OIDC. The rationale is that we consider setting up `DATABRICKS_OIDC_TOKEN` as a stronger signal of intent than enabling OIDC for the whole Github Action. This PR also moves `IDTokenSource` in its own `oidc` package within the `experimental/auth` package to clarify that these interfaces are still being validated. ## How is this tested? Complete test coverage of the new source. --- NEXT_CHANGELOG.md | 3 + config/auth_databricks_oidc.go | 3 +- config/auth_databricks_oidc_test.go | 69 ++++++-------- config/auth_default.go | 16 +++- config/config.go | 3 + config/experimental/auth/oidc/oidc.go | 50 ++++++++++ config/experimental/auth/oidc/oidc_test.go | 106 +++++++++++++++++++++ config/id_token_source_github_oidc.go | 5 +- config/id_token_source_github_oidc_test.go | 5 +- config/token_source_strategy.go | 12 --- 10 files changed, 213 insertions(+), 59 deletions(-) create mode 100644 config/experimental/auth/oidc/oidc.go create mode 100644 config/experimental/auth/oidc/oidc_test.go diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index b4585a98d..339656483 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -4,6 +4,9 @@ ### New Features and Improvements +- Add support for OIDC ID token authentication using an environment variable + ([PR #1215](https://github.com/databricks/databricks-sdk-go/pull/1215)). + ### Bug Fixes ### Documentation diff --git a/config/auth_databricks_oidc.go b/config/auth_databricks_oidc.go index 434eb49b1..7a5f35ef2 100644 --- a/config/auth_databricks_oidc.go +++ b/config/auth_databricks_oidc.go @@ -6,6 +6,7 @@ import ( "net/url" "github.com/databricks/databricks-sdk-go/config/experimental/auth" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/credentials/u2m" "github.com/databricks/databricks-sdk-go/logger" "golang.org/x/oauth2" @@ -35,7 +36,7 @@ type DatabricksOIDCTokenSourceConfig struct { // This is only used for Workspace level tokens. Audience string // IdTokenSource returns the IDToken to be used for the token exchange. - IdTokenSource IDTokenSource + IdTokenSource oidc.IDTokenSource } // databricksOIDCTokenSource is a auth.TokenSource which exchanges a token using diff --git a/config/auth_databricks_oidc_test.go b/config/auth_databricks_oidc_test.go index 388766e14..64077bfab 100644 --- a/config/auth_databricks_oidc_test.go +++ b/config/auth_databricks_oidc_test.go @@ -7,25 +7,13 @@ import ( "net/url" "testing" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/credentials/u2m" "github.com/databricks/databricks-sdk-go/httpclient/fixtures" "github.com/google/go-cmp/cmp" "golang.org/x/oauth2" ) -type mockIdTokenProvider struct { - // input - audience string - // output - idToken string - err error -} - -func (m *mockIdTokenProvider) IDToken(ctx context.Context, audience string) (*IDToken, error) { - m.audience = audience - return &IDToken{Value: m.idToken}, m.err -} - func TestDatabricksOidcTokenSource(t *testing.T) { testCases := []struct { desc string @@ -36,7 +24,7 @@ func TestDatabricksOidcTokenSource(t *testing.T) { httpTransport http.RoundTripper oidcEndpointProvider func(context.Context) (*u2m.OAuthAuthorizationServer, error) idToken string - expectedAudience string + wantAudience string tokenProviderError error wantToken string wantErrPrefix *string @@ -64,7 +52,7 @@ func TestDatabricksOidcTokenSource(t *testing.T) { TokenEndpoint: "https://host.com/oidc/v1/token", }, nil }, - expectedAudience: "token-audience", + wantAudience: "token-audience", tokenProviderError: errors.New("error getting id token"), wantErrPrefix: errPrefix("error getting id token"), }, @@ -86,9 +74,9 @@ func TestDatabricksOidcTokenSource(t *testing.T) { }, }, }, - expectedAudience: "token-audience", - idToken: "id-token-42", - wantErrPrefix: errPrefix("oauth2: cannot fetch token: Internal Server Error"), + wantAudience: "token-audience", + idToken: "id-token-42", + wantErrPrefix: errPrefix("oauth2: cannot fetch token: Internal Server Error"), }, { desc: "invalid auth token", @@ -111,9 +99,9 @@ func TestDatabricksOidcTokenSource(t *testing.T) { }, }, }, - expectedAudience: "token-audience", - idToken: "id-token-42", - wantErrPrefix: errPrefix("oauth2: server response missing access_token"), + wantAudience: "token-audience", + idToken: "id-token-42", + wantErrPrefix: errPrefix("oauth2: server response missing access_token"), }, { desc: "success workspace", @@ -147,9 +135,9 @@ func TestDatabricksOidcTokenSource(t *testing.T) { }, }, }, - expectedAudience: "token-audience", - idToken: "id-token-42", - wantToken: "test-auth-token", + wantAudience: "token-audience", + idToken: "id-token-42", + wantToken: "test-auth-token", }, { desc: "success account", @@ -183,9 +171,9 @@ func TestDatabricksOidcTokenSource(t *testing.T) { }, }, }, - expectedAudience: "token-audience", - idToken: "id-token-42", - wantToken: "test-auth-token", + wantAudience: "token-audience", + idToken: "id-token-42", + wantToken: "test-auth-token", }, { desc: "default token audience account", @@ -211,9 +199,9 @@ func TestDatabricksOidcTokenSource(t *testing.T) { }, }, }, - expectedAudience: "ac123", - idToken: "id-token-42", - wantToken: "test-auth-token", + wantAudience: "ac123", + idToken: "id-token-42", + wantToken: "test-auth-token", }, { desc: "default token audience workspace", @@ -238,26 +226,25 @@ func TestDatabricksOidcTokenSource(t *testing.T) { }, }, }, - expectedAudience: "https://host.com/oidc/v1/token", - idToken: "id-token-42", - wantToken: "test-auth-token", + wantAudience: "https://host.com/oidc/v1/token", + idToken: "id-token-42", + wantToken: "test-auth-token", }, } for _, tc := range testCases { t.Run(tc.desc, func(t *testing.T) { - p := &mockIdTokenProvider{ - idToken: tc.idToken, - err: tc.tokenProviderError, - } - + var gotAudience string // set when IDTokenSource is called cfg := DatabricksOIDCTokenSourceConfig{ ClientID: tc.clientID, AccountID: tc.accountID, Host: tc.host, TokenEndpointProvider: tc.oidcEndpointProvider, Audience: tc.tokenAudience, - IdTokenSource: p, + IdTokenSource: oidc.IDTokenSourceFn(func(ctx context.Context, aud string) (*oidc.IDToken, error) { + gotAudience = aud + return &oidc.IDToken{Value: tc.idToken}, tc.tokenProviderError + }), } ts := NewDatabricksOIDCTokenSource(cfg) @@ -283,8 +270,8 @@ func TestDatabricksOidcTokenSource(t *testing.T) { if tc.wantErrPrefix != nil && !hasPrefix(err, *tc.wantErrPrefix) { t.Errorf("Token(ctx): got error %q, want error with prefix %q", err, *tc.wantErrPrefix) } - if tc.expectedAudience != p.audience { - t.Errorf("mockTokenProvider: got audience %s, want %s", p.audience, tc.expectedAudience) + if tc.wantAudience != gotAudience { + t.Errorf("mockTokenProvider: got audience %s, want %s", gotAudience, tc.wantAudience) } tokenValue := "" if token != nil { diff --git a/config/auth_default.go b/config/auth_default.go index a49757d02..15083ec02 100644 --- a/config/auth_default.go +++ b/config/auth_default.go @@ -6,6 +6,7 @@ import ( "fmt" "github.com/databricks/databricks-sdk-go/config/credentials" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/logger" ) @@ -13,9 +14,21 @@ import ( func buildOidcTokenCredentialStrategies(cfg *Config) []CredentialsStrategy { type namedIdTokenSource struct { name string - tokenSource IDTokenSource + tokenSource oidc.IDTokenSource } idTokenSources := []namedIdTokenSource{ + { + name: "env-oidc", + // If the OIDCTokenEnv is not set, use DATABRICKS_OIDC_TOKEN as + // default value. + tokenSource: func() oidc.IDTokenSource { + v := cfg.OIDCTokenEnv + if v == "" { + v = "DATABRICKS_OIDC_TOKEN" + } + return oidc.NewEnvIDTokenSource(v) + }(), + }, { name: "github-oidc", tokenSource: &githubIDTokenSource{ @@ -26,6 +39,7 @@ func buildOidcTokenCredentialStrategies(cfg *Config) []CredentialsStrategy { }, // Add new providers at the end of the list } + strategies := []CredentialsStrategy{} for _, idTokenSource := range idTokenSources { oidcConfig := DatabricksOIDCTokenSourceConfig{ diff --git a/config/config.go b/config/config.go index c81f03610..912fac5c0 100644 --- a/config/config.go +++ b/config/config.go @@ -108,6 +108,9 @@ type Config struct { // specified by this argument. This argument also holds currently selected auth. AuthType string `name:"auth_type" env:"DATABRICKS_AUTH_TYPE" auth:"-"` + // Environment variable name that contains an OIDC ID token. + OIDCTokenEnv string `name:"oidc_token_env" env:"DATABRICKS_OIDC_TOKEN_ENV" auth:"-"` + // Skip SSL certificate verification for HTTP calls. // Use at your own risk or for unit testing purposes. InsecureSkipVerify bool `name:"skip_verify" auth:"-"` diff --git a/config/experimental/auth/oidc/oidc.go b/config/experimental/auth/oidc/oidc.go new file mode 100644 index 000000000..ff671b8dc --- /dev/null +++ b/config/experimental/auth/oidc/oidc.go @@ -0,0 +1,50 @@ +// Package oidc provides utilities for working with OIDC ID tokens. +// +// This package is experimental and subject to change. +package oidc + +import ( + "context" + "fmt" + "os" +) + +// IDToken represents an OIDC ID token that can be exchanged for a Databricks +// access token. +type IDToken struct { + Value string +} + +// IDTokenSource is anything that returns an IDToken given an audience. +type IDTokenSource interface { + IDToken(ctx context.Context, audience string) (*IDToken, error) +} + +// IDTokenSourceFn is an adapter to allow the use of ordinary functions as +// IDTokenSource. +// +// Example: +// +// ts := IDTokenSourceFn(func(ctx context.Context, aud string) (*IDToken, error) { +// return &IDToken{}, nil +// }) +type IDTokenSourceFn func(ctx context.Context, audience string) (*IDToken, error) + +func (fn IDTokenSourceFn) IDToken(ctx context.Context, audience string) (*IDToken, error) { + return fn(ctx, audience) +} + +// NewEnvIDTokenSource returns an IDTokenSource that reads the token from +// environment variable env. +// +// Note that the IDTokenSource does not cache the token and will read the token +// from environment variable env each time. +func NewEnvIDTokenSource(env string) IDTokenSource { + return IDTokenSourceFn(func(ctx context.Context, _ string) (*IDToken, error) { + t := os.Getenv(env) + if t == "" { + return nil, fmt.Errorf("missing env var %q", env) + } + return &IDToken{Value: t}, nil + }) +} diff --git a/config/experimental/auth/oidc/oidc_test.go b/config/experimental/auth/oidc/oidc_test.go new file mode 100644 index 000000000..9c34174cc --- /dev/null +++ b/config/experimental/auth/oidc/oidc_test.go @@ -0,0 +1,106 @@ +package oidc + +import ( + "context" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" +) + +func TestIDTokenSourceFn(t *testing.T) { + wantToken := &IDToken{Value: "from-func"} + wantErr := fmt.Errorf("test error") + wantAud := "func-audience" + wantCtx := context.Background() + + ts := IDTokenSourceFn(func(gotCtx context.Context, gotAud string) (*IDToken, error) { + if gotCtx != wantCtx { + t.Errorf("unexpected context: got %v, want %v", gotCtx, wantCtx) + } + if gotAud != wantAud { + t.Errorf("unexpected audience: got %q, want %q", gotAud, wantAud) + } + return wantToken, wantErr + }) + + gotToken, gotErr := ts.IDToken(wantCtx, wantAud) + + if gotErr != wantErr { + t.Errorf("IDToken() want error: %v, got error: %v", wantErr, gotErr) + } + if !cmp.Equal(gotToken, wantToken) { + t.Errorf("IDToken() token = %v, want %v", gotToken, wantToken) + } +} + +func TestNewEnvIDTokenSource(t *testing.T) { + testCases := []struct { + desc string + envName string + envValue string + audience string + want *IDToken + wantErr bool + }{ + { + desc: "Success - variable set", + envName: "OIDC_TEST_TOKEN_SUCCESS", + envValue: "test-token-123", + audience: "test-audience-1", + want: &IDToken{Value: "test-token-123"}, + wantErr: false, + }, + { + desc: "Failure - variable not set", + envName: "OIDC_TEST_TOKEN_MISSING", + envValue: "", + audience: "test-audience-2", + want: nil, + wantErr: true, + }, + { + desc: "Failure - variable set to empty string", + envName: "OIDC_TEST_TOKEN_EMPTY", + envValue: "", + audience: "test-audience-3", + want: nil, + wantErr: true, + }, + { + desc: "Success - different variable name", + envName: "ANOTHER_OIDC_TOKEN", + envValue: "another-token-456", + audience: "test-audience-4", + want: &IDToken{Value: "another-token-456"}, + wantErr: false, + }, + { + desc: "Success - empty audience string", + envName: "OIDC_TEST_TOKEN_NO_AUDIENCE", + envValue: "token-no-audience", + audience: "", + want: &IDToken{Value: "token-no-audience"}, + wantErr: false, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + t.Setenv(tc.envName, tc.envValue) + + ts := NewEnvIDTokenSource(tc.envName) + got, gotErr := ts.IDToken(context.Background(), tc.audience) + + if tc.wantErr && gotErr == nil { + t.Fatalf("IDToken() want error, got none") + } + if !tc.wantErr && gotErr != nil { + t.Fatalf("IDToken() want no error, got error: %v", gotErr) + } + if !cmp.Equal(got, tc.want) { + t.Errorf("IDToken() token = %v, want %v", got, tc.want) + } + }) + } +} diff --git a/config/id_token_source_github_oidc.go b/config/id_token_source_github_oidc.go index 6f4048226..93b9bb11c 100644 --- a/config/id_token_source_github_oidc.go +++ b/config/id_token_source_github_oidc.go @@ -5,6 +5,7 @@ import ( "errors" "fmt" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/httpclient" "github.com/databricks/databricks-sdk-go/logger" ) @@ -18,7 +19,7 @@ type githubIDTokenSource struct { // IDToken returns a JWT Token for the specified audience. It will return // an error if not running in GitHub Actions. -func (g *githubIDTokenSource) IDToken(ctx context.Context, audience string) (*IDToken, error) { +func (g *githubIDTokenSource) IDToken(ctx context.Context, audience string) (*oidc.IDToken, error) { if g.actionsIDTokenRequestURL == "" { logger.Debugf(ctx, "Missing ActionsIDTokenRequestURL, likely not calling from a Github action") return nil, errors.New("missing ActionsIDTokenRequestURL") @@ -28,7 +29,7 @@ func (g *githubIDTokenSource) IDToken(ctx context.Context, audience string) (*ID return nil, errors.New("missing ActionsIDTokenRequestToken") } - resp := &IDToken{} + resp := &oidc.IDToken{} requestUrl := g.actionsIDTokenRequestURL if audience != "" { requestUrl = fmt.Sprintf("%s&audience=%s", requestUrl, audience) diff --git a/config/id_token_source_github_oidc_test.go b/config/id_token_source_github_oidc_test.go index 58a1bbc2b..af71858d3 100644 --- a/config/id_token_source_github_oidc_test.go +++ b/config/id_token_source_github_oidc_test.go @@ -5,6 +5,7 @@ import ( "net/http" "testing" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/httpclient" "github.com/databricks/databricks-sdk-go/httpclient/fixtures" "github.com/google/go-cmp/cmp" @@ -17,7 +18,7 @@ func TestGithubIDTokenSource(t *testing.T) { tokenRequestToken string audience string httpTransport http.RoundTripper - wantToken *IDToken + wantToken *oidc.IDToken wantErrPrefix *string }{ { @@ -59,7 +60,7 @@ func TestGithubIDTokenSource(t *testing.T) { Response: `{"value": "id-token-42"}`, }, }, - wantToken: &IDToken{ + wantToken: &oidc.IDToken{ Value: "id-token-42", }, }, diff --git a/config/token_source_strategy.go b/config/token_source_strategy.go index fd5d995ce..45393ca31 100644 --- a/config/token_source_strategy.go +++ b/config/token_source_strategy.go @@ -10,18 +10,6 @@ import ( "github.com/databricks/databricks-sdk-go/logger" ) -// IDToken is a token that can be exchanged for a an access token. -// Value is the token string. -type IDToken struct { - Value string -} - -// IDTokenSource is anything that returns an IDToken given an audience. -type IDTokenSource interface { - // Function to get the token - IDToken(ctx context.Context, audience string) (*IDToken, error) -} - // Creates a CredentialsStrategy from a TokenSource. func NewTokenSourceStrategy( name string, From 49996cc24dea6723969b1158932566ed66a2ad55 Mon Sep 17 00:00:00 2001 From: hectorcast-db Date: Tue, 6 May 2025 20:41:48 +0200 Subject: [PATCH 52/54] Add support to load OIDC ID Tokens from a file (#1213) ## What changes are proposed in this pull request? Add support to load OIDC ID Tokens from a file. This adds support for arbitrary ID Tokens in exchange of delegating to the user the responsibility of minting and refreshing the token. ## How is this tested? Added unit test. --------- Co-authored-by: Renaud Hartert --- NEXT_CHANGELOG.md | 2 + config/auth_default.go | 4 + config/config.go | 5 +- config/experimental/auth/oidc/oidc.go | 23 ++++- config/experimental/auth/oidc/oidc_test.go | 98 ++++++++++++++++++---- 5 files changed, 112 insertions(+), 20 deletions(-) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 339656483..4cfcfa540 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -4,6 +4,8 @@ ### New Features and Improvements +- Add support for OIDC ID token authentication using a file + ([PR #1213](https://github.com/databricks/databricks-sdk-go/pull/1213)). - Add support for OIDC ID token authentication using an environment variable ([PR #1215](https://github.com/databricks/databricks-sdk-go/pull/1215)). diff --git a/config/auth_default.go b/config/auth_default.go index 15083ec02..a53665bc4 100644 --- a/config/auth_default.go +++ b/config/auth_default.go @@ -29,6 +29,10 @@ func buildOidcTokenCredentialStrategies(cfg *Config) []CredentialsStrategy { return oidc.NewEnvIDTokenSource(v) }(), }, + { + name: "file-oidc", + tokenSource: oidc.NewFileTokenSource(cfg.OIDCTokenFilepath), + }, { name: "github-oidc", tokenSource: &githubIDTokenSource{ diff --git a/config/config.go b/config/config.go index 912fac5c0..a31508517 100644 --- a/config/config.go +++ b/config/config.go @@ -108,8 +108,11 @@ type Config struct { // specified by this argument. This argument also holds currently selected auth. AuthType string `name:"auth_type" env:"DATABRICKS_AUTH_TYPE" auth:"-"` + // Path to the file containing an OIDC ID token. + OIDCTokenFilepath string `name:"databricks_id_token_filepath" env:"DATABRICKS_OIDC_TOKEN_FILEPATH" auth:"file-oidc"` + // Environment variable name that contains an OIDC ID token. - OIDCTokenEnv string `name:"oidc_token_env" env:"DATABRICKS_OIDC_TOKEN_ENV" auth:"-"` + OIDCTokenEnv string `name:"oidc_token_env" env:"DATABRICKS_OIDC_TOKEN_ENV" auth:"env-oidc"` // Skip SSL certificate verification for HTTP calls. // Use at your own risk or for unit testing purposes. diff --git a/config/experimental/auth/oidc/oidc.go b/config/experimental/auth/oidc/oidc.go index ff671b8dc..b2f35bf54 100644 --- a/config/experimental/auth/oidc/oidc.go +++ b/config/experimental/auth/oidc/oidc.go @@ -34,7 +34,7 @@ func (fn IDTokenSourceFn) IDToken(ctx context.Context, audience string) (*IDToke return fn(ctx, audience) } -// NewEnvIDTokenSource returns an IDTokenSource that reads the token from +// NewEnvIDTokenSource returns an IDTokenSource that reads the IDtoken from // environment variable env. // // Note that the IDTokenSource does not cache the token and will read the token @@ -48,3 +48,24 @@ func NewEnvIDTokenSource(env string) IDTokenSource { return &IDToken{Value: t}, nil }) } + +// NewFileTokenSource returns an IDTokenSource that reads the ID token from a +// file. The file should contain a single line with the token. +func NewFileTokenSource(path string) IDTokenSource { + return IDTokenSourceFn(func(ctx context.Context, _ string) (*IDToken, error) { + if path == "" { + return nil, fmt.Errorf("missing path") + } + t, err := os.ReadFile(path) + if err != nil { + if os.IsNotExist(err) { + return nil, fmt.Errorf("file %q does not exist", path) + } + return nil, err + } + if len(t) == 0 { + return nil, fmt.Errorf("file %q is empty", path) + } + return &IDToken{Value: string(t)}, nil + }) +} diff --git a/config/experimental/auth/oidc/oidc_test.go b/config/experimental/auth/oidc/oidc_test.go index 9c34174cc..0949aac8b 100644 --- a/config/experimental/auth/oidc/oidc_test.go +++ b/config/experimental/auth/oidc/oidc_test.go @@ -3,6 +3,8 @@ package oidc import ( "context" "fmt" + "os" + "path/filepath" "testing" "github.com/google/go-cmp/cmp" @@ -39,50 +41,37 @@ func TestNewEnvIDTokenSource(t *testing.T) { desc string envName string envValue string - audience string want *IDToken wantErr bool }{ { - desc: "Success - variable set", + desc: "success", envName: "OIDC_TEST_TOKEN_SUCCESS", envValue: "test-token-123", - audience: "test-audience-1", want: &IDToken{Value: "test-token-123"}, wantErr: false, }, { - desc: "Failure - variable not set", + desc: "missing env var", envName: "OIDC_TEST_TOKEN_MISSING", envValue: "", - audience: "test-audience-2", want: nil, wantErr: true, }, { - desc: "Failure - variable set to empty string", + desc: "empty env var", envName: "OIDC_TEST_TOKEN_EMPTY", envValue: "", - audience: "test-audience-3", want: nil, wantErr: true, }, { - desc: "Success - different variable name", + desc: "different variable name", envName: "ANOTHER_OIDC_TOKEN", envValue: "another-token-456", - audience: "test-audience-4", want: &IDToken{Value: "another-token-456"}, wantErr: false, }, - { - desc: "Success - empty audience string", - envName: "OIDC_TEST_TOKEN_NO_AUDIENCE", - envValue: "token-no-audience", - audience: "", - want: &IDToken{Value: "token-no-audience"}, - wantErr: false, - }, } for _, tc := range testCases { @@ -90,7 +79,80 @@ func TestNewEnvIDTokenSource(t *testing.T) { t.Setenv(tc.envName, tc.envValue) ts := NewEnvIDTokenSource(tc.envName) - got, gotErr := ts.IDToken(context.Background(), tc.audience) + got, gotErr := ts.IDToken(context.Background(), "") + + if tc.wantErr && gotErr == nil { + t.Fatalf("IDToken() want error, got none") + } + if !tc.wantErr && gotErr != nil { + t.Fatalf("IDToken() want no error, got error: %v", gotErr) + } + if !cmp.Equal(got, tc.want) { + t.Errorf("IDToken() token = %v, want %v", got, tc.want) + } + }) + } +} + +type testFile struct { + filename string + filecontent string +} + +func TestNewFileTokenSource(t *testing.T) { + testCases := []struct { + desc string + file *testFile // file to create + filepath string + want *IDToken + wantErr bool + }{ + { + desc: "missing filepath", + file: &testFile{filename: "token", filecontent: "content"}, + filepath: "", + wantErr: true, + }, + { + desc: "empty file", + file: &testFile{filename: "token", filecontent: ""}, + filepath: "token", + wantErr: true, + }, + { + desc: "file does not exist", + filepath: "nonexistent-file", + wantErr: true, + }, + { + desc: "file exists", + file: &testFile{filename: "token", filecontent: "content"}, + filepath: "token", + want: &IDToken{Value: "content"}, + }, + } + + for _, tc := range testCases { + t.Run(tc.desc, func(t *testing.T) { + tmpDir := t.TempDir() + + // Create the test file if any is given. + if tc.file != nil { + tokenFile := filepath.Join(tmpDir, tc.file.filename) + if err := os.WriteFile(tokenFile, []byte(tc.file.filecontent), 0644); err != nil { + t.Fatalf("failed to create token file: %v", err) + } + } + + // Only compute the fully qualified filepath if the relative + // filepath is given. + fp := tc.filepath + if tc.filepath != "" { + fp = filepath.Join(tmpDir, tc.filepath) + } + + ts := NewFileTokenSource(fp) + got, gotErr := ts.IDToken(context.Background(), "") if tc.wantErr && gotErr == nil { t.Fatalf("IDToken() want error, got none") From 4f0fe8782d4f09bf3929b00b8fedb70895bc30cb Mon Sep 17 00:00:00 2001 From: "deco-sdk-tagging[bot]" <192229699+deco-sdk-tagging[bot]@users.noreply.github.com> Date: Tue, 6 May 2025 19:23:32 +0000 Subject: [PATCH 53/54] [Release] Release v0.68.0 ## Release v0.68.0 ### New Features and Improvements - Add support for OIDC ID token authentication using a file ([PR #1213](https://github.com/databricks/databricks-sdk-go/pull/1213)). - Add support for OIDC ID token authentication using an environment variable ([PR #1215](https://github.com/databricks/databricks-sdk-go/pull/1215)). --- .release_metadata.json | 2 +- CHANGELOG.md | 10 ++++++++++ NEXT_CHANGELOG.md | 7 +------ version/version.go | 2 +- 4 files changed, 13 insertions(+), 8 deletions(-) diff --git a/.release_metadata.json b/.release_metadata.json index 7dc90fcb0..97831224e 100644 --- a/.release_metadata.json +++ b/.release_metadata.json @@ -1,3 +1,3 @@ { - "timestamp": "2025-05-02 16:25:19+0000" + "timestamp": "2025-05-06 19:23:28+0000" } \ No newline at end of file diff --git a/CHANGELOG.md b/CHANGELOG.md index 849ebab53..a856901b2 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,15 @@ # Version changelog +## Release v0.68.0 + +### New Features and Improvements + +- Add support for OIDC ID token authentication using a file + ([PR #1213](https://github.com/databricks/databricks-sdk-go/pull/1213)). +- Add support for OIDC ID token authentication using an environment variable + ([PR #1215](https://github.com/databricks/databricks-sdk-go/pull/1215)). + + ## Release v0.67.0 ### Bug Fixes diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 4cfcfa540..9c0031211 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -1,14 +1,9 @@ # NEXT CHANGELOG -## Release v0.68.0 +## Release v0.69.0 ### New Features and Improvements -- Add support for OIDC ID token authentication using a file - ([PR #1213](https://github.com/databricks/databricks-sdk-go/pull/1213)). -- Add support for OIDC ID token authentication using an environment variable - ([PR #1215](https://github.com/databricks/databricks-sdk-go/pull/1215)). - ### Bug Fixes ### Documentation diff --git a/version/version.go b/version/version.go index 724a984b6..4f296e86a 100644 --- a/version/version.go +++ b/version/version.go @@ -1,4 +1,4 @@ package version // Version of the SDK, updated manually before every tag -const Version = "0.67.0" +const Version = "0.68.0" From a2a34b9b7832af018b840a23e7c713ffe313ddad Mon Sep 17 00:00:00 2001 From: Renaud Hartert Date: Fri, 9 May 2025 14:34:05 +0200 Subject: [PATCH 54/54] Add support to authenticate with Account-wide token federation (#1219) ## What changes are proposed in this pull request? This PR adds support to authenticate with [Account-wide token federation](https://docs.databricks.com/aws/en/dev-tools/auth/oauth-federation#account-wide-token-federation) from the following auth methods: `env-oidc`, `file-oidc`, and `github-oidc`. The PR also slightly re-organize the code by moving the OIDC token source and Github IDTokenSource in the `oidc` package. ## How is this tested? Unit test + local validation. --- NEXT_CHANGELOG.md | 3 + config/auth_azure_github_oidc.go | 10 ++-- config/auth_default.go | 16 ++--- .../auth/oidc/github.go} | 18 ++++-- .../auth/oidc/github_test.go} | 7 +-- .../auth/oidc/tokensource.go} | 56 ++++++++++------- .../auth/oidc/tokensource_test.go} | 60 +++++++++++++++---- 7 files changed, 116 insertions(+), 54 deletions(-) rename config/{id_token_source_github_oidc.go => experimental/auth/oidc/github.go} (70%) rename config/{id_token_source_github_oidc_test.go => experimental/auth/oidc/github_test.go} (94%) rename config/{auth_databricks_oidc.go => experimental/auth/oidc/tokensource.go} (67%) rename config/{auth_databricks_oidc_test.go => experimental/auth/oidc/tokensource_test.go} (85%) diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index 9c0031211..fa2120b59 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -4,6 +4,9 @@ ### New Features and Improvements +- Add support to authenticate with Account-wide token federation from the + following auth methods: `env-oidc`, `file-oidc`, and `github-oidc`. + ### Bug Fixes ### Documentation diff --git a/config/auth_azure_github_oidc.go b/config/auth_azure_github_oidc.go index 7be69563f..8120f3b7b 100644 --- a/config/auth_azure_github_oidc.go +++ b/config/auth_azure_github_oidc.go @@ -7,6 +7,7 @@ import ( "time" "github.com/databricks/databricks-sdk-go/config/credentials" + "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/httpclient" "golang.org/x/oauth2" ) @@ -26,10 +27,11 @@ func (c AzureGithubOIDCCredentials) Configure(ctx context.Context, cfg *Config) if !cfg.IsAzure() || cfg.AzureClientID == "" || cfg.Host == "" || cfg.AzureTenantID == "" || cfg.ActionsIDTokenRequestURL == "" || cfg.ActionsIDTokenRequestToken == "" { return nil, nil } - supplier := githubIDTokenSource{actionsIDTokenRequestURL: cfg.ActionsIDTokenRequestURL, - actionsIDTokenRequestToken: cfg.ActionsIDTokenRequestToken, - refreshClient: cfg.refreshClient, - } + supplier := oidc.NewGithubIDTokenSource( + cfg.refreshClient, + cfg.ActionsIDTokenRequestURL, + cfg.ActionsIDTokenRequestToken, + ) idToken, err := supplier.IDToken(ctx, "api://AzureADTokenExchange") if err != nil { diff --git a/config/auth_default.go b/config/auth_default.go index a53665bc4..e2be578fe 100644 --- a/config/auth_default.go +++ b/config/auth_default.go @@ -35,28 +35,28 @@ func buildOidcTokenCredentialStrategies(cfg *Config) []CredentialsStrategy { }, { name: "github-oidc", - tokenSource: &githubIDTokenSource{ - actionsIDTokenRequestURL: cfg.ActionsIDTokenRequestURL, - actionsIDTokenRequestToken: cfg.ActionsIDTokenRequestToken, - refreshClient: cfg.refreshClient, - }, + tokenSource: oidc.NewGithubIDTokenSource( + cfg.refreshClient, + cfg.ActionsIDTokenRequestURL, + cfg.ActionsIDTokenRequestToken, + ), }, // Add new providers at the end of the list } strategies := []CredentialsStrategy{} for _, idTokenSource := range idTokenSources { - oidcConfig := DatabricksOIDCTokenSourceConfig{ + oidcConfig := oidc.DatabricksOIDCTokenSourceConfig{ ClientID: cfg.ClientID, Host: cfg.CanonicalHostName(), TokenEndpointProvider: cfg.getOidcEndpoints, Audience: cfg.TokenAudience, - IdTokenSource: idTokenSource.tokenSource, + IDTokenSource: idTokenSource.tokenSource, } if cfg.IsAccountClient() { oidcConfig.AccountID = cfg.AccountID } - tokenSource := NewDatabricksOIDCTokenSource(oidcConfig) + tokenSource := oidc.NewDatabricksOIDCTokenSource(oidcConfig) strategies = append(strategies, NewTokenSourceStrategy(idTokenSource.name, tokenSource)) } return strategies diff --git a/config/id_token_source_github_oidc.go b/config/experimental/auth/oidc/github.go similarity index 70% rename from config/id_token_source_github_oidc.go rename to config/experimental/auth/oidc/github.go index 93b9bb11c..5e7cfc961 100644 --- a/config/id_token_source_github_oidc.go +++ b/config/experimental/auth/oidc/github.go @@ -1,15 +1,25 @@ -package config +package oidc import ( "context" "errors" "fmt" - "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/httpclient" "github.com/databricks/databricks-sdk-go/logger" ) +// NewGithubIDTokenSource returns a new IDTokenSource that retrieves an IDToken +// from the Github Actions environment. This IDTokenSource is only valid when +// running in Github Actions with OIDC enabled. +func NewGithubIDTokenSource(client *httpclient.ApiClient, actionsIDTokenRequestURL, actionsIDTokenRequestToken string) IDTokenSource { + return &githubIDTokenSource{ + actionsIDTokenRequestURL: actionsIDTokenRequestURL, + actionsIDTokenRequestToken: actionsIDTokenRequestToken, + refreshClient: client, + } +} + // githubIDTokenSource retrieves JWT Tokens from Github Actions. type githubIDTokenSource struct { actionsIDTokenRequestURL string @@ -19,7 +29,7 @@ type githubIDTokenSource struct { // IDToken returns a JWT Token for the specified audience. It will return // an error if not running in GitHub Actions. -func (g *githubIDTokenSource) IDToken(ctx context.Context, audience string) (*oidc.IDToken, error) { +func (g *githubIDTokenSource) IDToken(ctx context.Context, audience string) (*IDToken, error) { if g.actionsIDTokenRequestURL == "" { logger.Debugf(ctx, "Missing ActionsIDTokenRequestURL, likely not calling from a Github action") return nil, errors.New("missing ActionsIDTokenRequestURL") @@ -29,7 +39,7 @@ func (g *githubIDTokenSource) IDToken(ctx context.Context, audience string) (*oi return nil, errors.New("missing ActionsIDTokenRequestToken") } - resp := &oidc.IDToken{} + resp := &IDToken{} requestUrl := g.actionsIDTokenRequestURL if audience != "" { requestUrl = fmt.Sprintf("%s&audience=%s", requestUrl, audience) diff --git a/config/id_token_source_github_oidc_test.go b/config/experimental/auth/oidc/github_test.go similarity index 94% rename from config/id_token_source_github_oidc_test.go rename to config/experimental/auth/oidc/github_test.go index af71858d3..33a1fb213 100644 --- a/config/id_token_source_github_oidc_test.go +++ b/config/experimental/auth/oidc/github_test.go @@ -1,11 +1,10 @@ -package config +package oidc import ( "context" "net/http" "testing" - "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/httpclient" "github.com/databricks/databricks-sdk-go/httpclient/fixtures" "github.com/google/go-cmp/cmp" @@ -18,7 +17,7 @@ func TestGithubIDTokenSource(t *testing.T) { tokenRequestToken string audience string httpTransport http.RoundTripper - wantToken *oidc.IDToken + wantToken *IDToken wantErrPrefix *string }{ { @@ -60,7 +59,7 @@ func TestGithubIDTokenSource(t *testing.T) { Response: `{"value": "id-token-42"}`, }, }, - wantToken: &oidc.IDToken{ + wantToken: &IDToken{ Value: "id-token-42", }, }, diff --git a/config/auth_databricks_oidc.go b/config/experimental/auth/oidc/tokensource.go similarity index 67% rename from config/auth_databricks_oidc.go rename to config/experimental/auth/oidc/tokensource.go index 7a5f35ef2..25eb1b6f4 100644 --- a/config/auth_databricks_oidc.go +++ b/config/experimental/auth/oidc/tokensource.go @@ -1,4 +1,4 @@ -package config +package oidc import ( "context" @@ -6,37 +6,44 @@ import ( "net/url" "github.com/databricks/databricks-sdk-go/config/experimental/auth" - "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/credentials/u2m" "github.com/databricks/databricks-sdk-go/logger" "golang.org/x/oauth2" "golang.org/x/oauth2/clientcredentials" ) -// Creates a new Databricks OIDC TokenSource. -func NewDatabricksOIDCTokenSource(cfg DatabricksOIDCTokenSourceConfig) auth.TokenSource { - return &databricksOIDCTokenSource{ - cfg: cfg, - } -} - -// Config for Databricks OIDC TokenSource. +// DatabricksOIDCTokenSourceConfig is the configuration for a Databricks OIDC +// TokenSource. type DatabricksOIDCTokenSourceConfig struct { - // ClientID is the client ID of the Databricks OIDC application. For - // Databricks Service Principal, this is the Application ID of the Service Principal. + // ClientID of the Databricks OIDC application. It corresponds to the + // Application ID of the Databricks Service Principal. + // + // This field is only required for Workload Identity Federation and should + // be empty for Account-wide token federation. ClientID string - // [Optional] AccountID is the account ID of the Databricks Account. - // This is only used for Account level tokens. + + // AccountID is the account ID of the Databricks Account. This field is + // only required for Account-wide token federation. AccountID string + // Host is the host of the Databricks account or workspace. Host string - // TokenEndpointProvider returns the token endpoint for the Databricks OIDC application. + + // TokenEndpointProvider returns the token endpoint for the Databricks OIDC + // application. TokenEndpointProvider func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) + // Audience is the audience of the Databricks OIDC application. // This is only used for Workspace level tokens. Audience string - // IdTokenSource returns the IDToken to be used for the token exchange. - IdTokenSource oidc.IDTokenSource + + // IDTokenSource returns the IDToken to be used for the token exchange. + IDTokenSource IDTokenSource +} + +// NewDatabricksOIDCTokenSource returns a new Databricks OIDC TokenSource. +func NewDatabricksOIDCTokenSource(cfg DatabricksOIDCTokenSourceConfig) auth.TokenSource { + return &databricksOIDCTokenSource{cfg: cfg} } // databricksOIDCTokenSource is a auth.TokenSource which exchanges a token using @@ -47,10 +54,6 @@ type databricksOIDCTokenSource struct { // Token implements [TokenSource.Token] func (w *databricksOIDCTokenSource) Token(ctx context.Context) (*oauth2.Token, error) { - if w.cfg.ClientID == "" { - logger.Debugf(ctx, "Missing ClientID") - return nil, errors.New("missing ClientID") - } if w.cfg.Host == "" { logger.Debugf(ctx, "Missing Host") return nil, errors.New("missing Host") @@ -59,8 +62,17 @@ func (w *databricksOIDCTokenSource) Token(ctx context.Context) (*oauth2.Token, e if err != nil { return nil, err } + + if w.cfg.ClientID == "" { + logger.Debugf(ctx, "No ClientID provided, authenticating with Account-wide token federation") + } else { + logger.Debugf(ctx, "Client ID provided, authenticating with Workload Identity Federation") + } + + // TODO: The audience is a concept of the IDToken that should likely be + // configured when the IDTokenSource is created. audience := w.determineAudience(endpoints) - idToken, err := w.cfg.IdTokenSource.IDToken(ctx, audience) + idToken, err := w.cfg.IDTokenSource.IDToken(ctx, audience) if err != nil { return nil, err } diff --git a/config/auth_databricks_oidc_test.go b/config/experimental/auth/oidc/tokensource_test.go similarity index 85% rename from config/auth_databricks_oidc_test.go rename to config/experimental/auth/oidc/tokensource_test.go index 64077bfab..3410978c6 100644 --- a/config/auth_databricks_oidc_test.go +++ b/config/experimental/auth/oidc/tokensource_test.go @@ -1,19 +1,27 @@ -package config +package oidc import ( "context" "errors" "net/http" "net/url" + "strings" "testing" - "github.com/databricks/databricks-sdk-go/config/experimental/auth/oidc" "github.com/databricks/databricks-sdk-go/credentials/u2m" "github.com/databricks/databricks-sdk-go/httpclient/fixtures" "github.com/google/go-cmp/cmp" "golang.org/x/oauth2" ) +func errPrefix(s string) *string { + return &s +} + +func hasPrefix(err error, prefix string) bool { + return strings.HasPrefix(err.Error(), prefix) +} + func TestDatabricksOidcTokenSource(t *testing.T) { testCases := []struct { desc string @@ -35,12 +43,6 @@ func TestDatabricksOidcTokenSource(t *testing.T) { tokenAudience: "token-audience", wantErrPrefix: errPrefix("missing Host"), }, - { - desc: "missing client ID", - host: "http://host.com", - tokenAudience: "token-audience", - wantErrPrefix: errPrefix("missing ClientID"), - }, { desc: "token provider error", @@ -104,7 +106,7 @@ func TestDatabricksOidcTokenSource(t *testing.T) { wantErrPrefix: errPrefix("oauth2: server response missing access_token"), }, { - desc: "success workspace", + desc: "success WIF workspace", clientID: "client-id", host: "http://host.com", tokenAudience: "token-audience", @@ -140,7 +142,7 @@ func TestDatabricksOidcTokenSource(t *testing.T) { wantToken: "test-auth-token", }, { - desc: "success account", + desc: "success WIF account", clientID: "client-id", accountID: "ac123", host: "https://accounts.databricks.com", @@ -230,6 +232,40 @@ func TestDatabricksOidcTokenSource(t *testing.T) { idToken: "id-token-42", wantToken: "test-auth-token", }, + { + desc: "success account-wide", + host: "http://host.com", + tokenAudience: "token-audience", + oidcEndpointProvider: func(ctx context.Context) (*u2m.OAuthAuthorizationServer, error) { + return &u2m.OAuthAuthorizationServer{ + TokenEndpoint: "https://host.com/oidc/v1/token", + }, nil + }, + httpTransport: fixtures.MappingTransport{ + "POST /oidc/v1/token": { + + Status: http.StatusOK, + ExpectedHeaders: map[string]string{ + "Content-Type": "application/x-www-form-urlencoded", + }, + ExpectedRequest: url.Values{ + "scope": {"all-apis"}, + "subject_token_type": {"urn:ietf:params:oauth:token-type:jwt"}, + "subject_token": {"id-token-42"}, + "grant_type": {"urn:ietf:params:oauth:grant-type:token-exchange"}, + }, + Response: map[string]string{ + "token_type": "access-token", + "access_token": "test-auth-token", + "refresh_token": "refresh", + "expires_on": "0", + }, + }, + }, + wantAudience: "token-audience", + idToken: "id-token-42", + wantToken: "test-auth-token", + }, } for _, tc := range testCases { @@ -241,9 +277,9 @@ func TestDatabricksOidcTokenSource(t *testing.T) { Host: tc.host, TokenEndpointProvider: tc.oidcEndpointProvider, Audience: tc.tokenAudience, - IdTokenSource: oidc.IDTokenSourceFn(func(ctx context.Context, aud string) (*oidc.IDToken, error) { + IDTokenSource: IDTokenSourceFn(func(ctx context.Context, aud string) (*IDToken, error) { gotAudience = aud - return &oidc.IDToken{Value: tc.idToken}, tc.tokenProviderError + return &IDToken{Value: tc.idToken}, tc.tokenProviderError }), }