diff --git a/.codegen/_openapi_sha b/.codegen/_openapi_sha index 26ece1bc5..8cd956362 100644 --- a/.codegen/_openapi_sha +++ b/.codegen/_openapi_sha @@ -1 +1 @@ -05692f4dcf168be190bb7bcda725ee8b368b7ae3 \ No newline at end of file +06a18b97d7996d6cd8dd88bfdb0f2c2792739e46 \ No newline at end of file diff --git a/.gitattributes b/.gitattributes index 137cc27b5..20de597f1 100755 --- a/.gitattributes +++ b/.gitattributes @@ -742,6 +742,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/LogSyncStat databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/MavenLibrary.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/NodeInstanceType.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/NodeType.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/NodeTypeFlexibility.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/PendingInstanceError.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/PermanentDeleteCluster.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/PermanentDeleteClusterResponse.java linguist-generated=true @@ -2504,7 +2505,7 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/sql/WidgetPosition. databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/ColumnInfo.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateEndpoint.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateVectorIndexRequest.java linguist-generated=true -databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateVectorIndexResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CustomTag.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataResult.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataVectorIndexRequest.java linguist-generated=true @@ -2531,6 +2532,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/ListVa databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/ListVectorIndexesResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/MapStringValueEntry.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/MiniVectorIndex.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PatchEndpointBudgetPolicyRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PatchEndpointBudgetPolicyResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PipelineType.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/QueryVectorIndexNextPageRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/QueryVectorIndexRequest.java linguist-generated=true @@ -2542,6 +2545,8 @@ databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/ScanVe databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/Struct.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/SyncIndexRequest.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/SyncIndexResponse.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/UpdateEndpointCustomTagsRequest.java linguist-generated=true +databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/UpdateEndpointCustomTagsResponse.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/UpsertDataResult.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/UpsertDataStatus.java linguist-generated=true databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/UpsertDataVectorIndexRequest.java linguist-generated=true diff --git a/NEXT_CHANGELOG.md b/NEXT_CHANGELOG.md index cc0f26322..590cacca6 100644 --- a/NEXT_CHANGELOG.md +++ b/NEXT_CHANGELOG.md @@ -11,3 +11,20 @@ ### Internal Changes ### API Changes +* Added `updateEndpointBudgetPolicy()` and `updateEndpointCustomTags()` methods for `workspaceClient.vectorSearchEndpoints()` service. +* Added `nodeTypeFlexibility` field for `com.databricks.sdk.service.compute.EditInstancePool`. +* Added `pageSize` and `pageToken` fields for `com.databricks.sdk.service.compute.GetEvents`. +* Added `nextPageToken` and `prevPageToken` fields for `com.databricks.sdk.service.compute.GetEventsResponse`. +* Added `nodeTypeFlexibility` field for `com.databricks.sdk.service.compute.GetInstancePool`. +* Added `nodeTypeFlexibility` field for `com.databricks.sdk.service.compute.InstancePoolAndStats`. +* Added `effectivePerformanceTarget` field for `com.databricks.sdk.service.jobs.RepairHistoryItem`. +* Added `performanceTarget` field for `com.databricks.sdk.service.jobs.RepairRun`. +* Added `budgetPolicyId` field for `com.databricks.sdk.service.vectorsearch.CreateEndpoint`. +* Added `customTags` and `effectiveBudgetPolicyId` fields for `com.databricks.sdk.service.vectorsearch.EndpointInfo`. +* Added `DISABLED` enum value for `com.databricks.sdk.service.jobs.TerminationCodeCode`. +* [Breaking] Changed `createIndex()` method for `workspaceClient.vectorSearchIndexes()` service to return `com.databricks.sdk.service.vectorsearch.VectorIndex` class. +* [Breaking] Changed `deleteDataVectorIndex()` method for `workspaceClient.vectorSearchIndexes()` service . HTTP method/verb has changed. +* [Breaking] Changed `deleteDataVectorIndex()` method for `workspaceClient.vectorSearchIndexes()` service with new required argument order. +* [Breaking] Changed `dataArray` field for `com.databricks.sdk.service.vectorsearch.ResultData` to type `com.databricks.sdk.service.vectorsearch.ListValueList` class. +* [Breaking] Changed waiter for `workspaceClient.vectorSearchEndpoints().createEndpoint()` method. +* [Breaking] Removed `nullValue` field for `com.databricks.sdk.service.vectorsearch.Value`. diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java index 547aa11f5..84bc0cd44 100755 --- a/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java +++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/WorkspaceClient.java @@ -1661,9 +1661,9 @@ public VectorSearchEndpointsAPI vectorSearchEndpoints() { * **Index**: An efficient representation of your embedding vectors that supports real-time and * efficient approximate nearest neighbor (ANN) search queries. * - *
There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index that + *
There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index that
* automatically syncs with a source Delta Table, automatically and incrementally updating the
- * index as the underlying data in the Delta Table changes. * **Direct Vector Access Index**: An
+ * index as the underlying data in the Delta Table changes. - **Direct Vector Access Index**: An
* index that supports direct read and write of vectors and metadata through our REST and SDK
* APIs. With this model, the user manages index updates.
*/
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/cleanrooms/CleanRoomAssetNotebook.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/cleanrooms/CleanRoomAssetNotebook.java
index 20673bbf9..097cb43c9 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/cleanrooms/CleanRoomAssetNotebook.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/cleanrooms/CleanRoomAssetNotebook.java
@@ -9,7 +9,7 @@
@Generated
public class CleanRoomAssetNotebook {
- /** Server generated checksum that represents the notebook version. */
+ /** Server generated etag that represents the notebook version. */
@JsonProperty("etag")
private String etag;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditInstancePool.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditInstancePool.java
index 5f1c2d218..d6daf72a8 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditInstancePool.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/EditInstancePool.java
@@ -52,6 +52,13 @@ public class EditInstancePool {
@JsonProperty("min_idle_instances")
private Long minIdleInstances;
+ /**
+ * For Fleet-pool V2, this object contains the information about the alternate node type ids to
+ * use when attempting to launch a cluster if the node type id is not available.
+ */
+ @JsonProperty("node_type_flexibility")
+ private NodeTypeFlexibility nodeTypeFlexibility;
+
/**
* This field encodes, through a single value, the resources available to each of the Spark nodes
* in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -116,6 +123,15 @@ public Long getMinIdleInstances() {
return minIdleInstances;
}
+ public EditInstancePool setNodeTypeFlexibility(NodeTypeFlexibility nodeTypeFlexibility) {
+ this.nodeTypeFlexibility = nodeTypeFlexibility;
+ return this;
+ }
+
+ public NodeTypeFlexibility getNodeTypeFlexibility() {
+ return nodeTypeFlexibility;
+ }
+
public EditInstancePool setNodeTypeId(String nodeTypeId) {
this.nodeTypeId = nodeTypeId;
return this;
@@ -137,6 +153,7 @@ public boolean equals(Object o) {
&& Objects.equals(instancePoolName, that.instancePoolName)
&& Objects.equals(maxCapacity, that.maxCapacity)
&& Objects.equals(minIdleInstances, that.minIdleInstances)
+ && Objects.equals(nodeTypeFlexibility, that.nodeTypeFlexibility)
&& Objects.equals(nodeTypeId, that.nodeTypeId);
}
@@ -149,6 +166,7 @@ public int hashCode() {
instancePoolName,
maxCapacity,
minIdleInstances,
+ nodeTypeFlexibility,
nodeTypeId);
}
@@ -161,6 +179,7 @@ public String toString() {
.add("instancePoolName", instancePoolName)
.add("maxCapacity", maxCapacity)
.add("minIdleInstances", minIdleInstances)
+ .add("nodeTypeFlexibility", nodeTypeFlexibility)
.add("nodeTypeId", nodeTypeId)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetEvents.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetEvents.java
index 2d6eda9cd..b9d5f385e 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetEvents.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetEvents.java
@@ -23,14 +23,18 @@ public class GetEvents {
private Collection The maximum number of events to include in a page of events. Defaults to 50, and maximum
* allowed value is 500.
*/
@JsonProperty("limit")
private Long limit;
/**
- * The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the
+ * Deprecated: use page_token in combination with page_size instead.
+ *
+ * The offset in the result set. Defaults to 0 (no offset). When an offset is specified and the
* results are requested in descending order, the end_time field is required.
*/
@JsonProperty("offset")
@@ -40,6 +44,22 @@ public class GetEvents {
@JsonProperty("order")
private GetEventsOrder order;
+ /**
+ * The maximum number of events to include in a page of events. The server may further constrain
+ * the maximum number of results returned in a single page. If the page_size is empty or 0, the
+ * server will decide the number of results to be returned. The field has to be in the range
+ * [0,500]. If the value is outside the range, the server enforces 0 or 500.
+ */
+ @JsonProperty("page_size")
+ private Long pageSize;
+
+ /**
+ * Use next_page_token or prev_page_token returned from the previous request to list the next or
+ * previous page of events respectively. If page_token is empty, the first page is returned.
+ */
+ @JsonProperty("page_token")
+ private String pageToken;
+
/**
* The start time in epoch milliseconds. If empty, returns events starting from the beginning of
* time.
@@ -101,6 +121,24 @@ public GetEventsOrder getOrder() {
return order;
}
+ public GetEvents setPageSize(Long pageSize) {
+ this.pageSize = pageSize;
+ return this;
+ }
+
+ public Long getPageSize() {
+ return pageSize;
+ }
+
+ public GetEvents setPageToken(String pageToken) {
+ this.pageToken = pageToken;
+ return this;
+ }
+
+ public String getPageToken() {
+ return pageToken;
+ }
+
public GetEvents setStartTime(Long startTime) {
this.startTime = startTime;
return this;
@@ -121,12 +159,15 @@ public boolean equals(Object o) {
&& Objects.equals(limit, that.limit)
&& Objects.equals(offset, that.offset)
&& Objects.equals(order, that.order)
+ && Objects.equals(pageSize, that.pageSize)
+ && Objects.equals(pageToken, that.pageToken)
&& Objects.equals(startTime, that.startTime);
}
@Override
public int hashCode() {
- return Objects.hash(clusterId, endTime, eventTypes, limit, offset, order, startTime);
+ return Objects.hash(
+ clusterId, endTime, eventTypes, limit, offset, order, pageSize, pageToken, startTime);
}
@Override
@@ -138,6 +179,8 @@ public String toString() {
.add("limit", limit)
.add("offset", offset)
.add("order", order)
+ .add("pageSize", pageSize)
+ .add("pageToken", pageToken)
.add("startTime", startTime)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetEventsResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetEventsResponse.java
index 7499839bb..0ffb98a2d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetEventsResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetEventsResponse.java
@@ -15,13 +15,34 @@ public class GetEventsResponse {
private Collection The parameters required to retrieve the next page of events. Omitted if there are no more
* events to read.
*/
@JsonProperty("next_page")
private GetEvents nextPage;
- /** The total number of events filtered by the start_time, end_time, and event_types. */
+ /**
+ * This field represents the pagination token to retrieve the next page of results. If the value
+ * is "", it means no further results for the request.
+ */
+ @JsonProperty("next_page_token")
+ private String nextPageToken;
+
+ /**
+ * This field represents the pagination token to retrieve the previous page of results. If the
+ * value is "", it means no further results for the request.
+ */
+ @JsonProperty("prev_page_token")
+ private String prevPageToken;
+
+ /**
+ * Deprecated: Returns 0 when request uses page_token. Will start returning zero when request uses
+ * offset/limit soon.
+ *
+ * The total number of events filtered by the start_time, end_time, and event_types.
+ */
@JsonProperty("total_count")
private Long totalCount;
@@ -43,6 +64,24 @@ public GetEvents getNextPage() {
return nextPage;
}
+ public GetEventsResponse setNextPageToken(String nextPageToken) {
+ this.nextPageToken = nextPageToken;
+ return this;
+ }
+
+ public String getNextPageToken() {
+ return nextPageToken;
+ }
+
+ public GetEventsResponse setPrevPageToken(String prevPageToken) {
+ this.prevPageToken = prevPageToken;
+ return this;
+ }
+
+ public String getPrevPageToken() {
+ return prevPageToken;
+ }
+
public GetEventsResponse setTotalCount(Long totalCount) {
this.totalCount = totalCount;
return this;
@@ -59,12 +98,14 @@ public boolean equals(Object o) {
GetEventsResponse that = (GetEventsResponse) o;
return Objects.equals(events, that.events)
&& Objects.equals(nextPage, that.nextPage)
+ && Objects.equals(nextPageToken, that.nextPageToken)
+ && Objects.equals(prevPageToken, that.prevPageToken)
&& Objects.equals(totalCount, that.totalCount);
}
@Override
public int hashCode() {
- return Objects.hash(events, nextPage, totalCount);
+ return Objects.hash(events, nextPage, nextPageToken, prevPageToken, totalCount);
}
@Override
@@ -72,6 +113,8 @@ public String toString() {
return new ToStringer(GetEventsResponse.class)
.add("events", events)
.add("nextPage", nextPage)
+ .add("nextPageToken", nextPageToken)
+ .add("prevPageToken", prevPageToken)
.add("totalCount", totalCount)
.toString();
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetInstancePool.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetInstancePool.java
index 97feb90b6..13f105576 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetInstancePool.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/GetInstancePool.java
@@ -101,6 +101,13 @@ public class GetInstancePool {
@JsonProperty("min_idle_instances")
private Long minIdleInstances;
+ /**
+ * For Fleet-pool V2, this object contains the information about the alternate node type ids to
+ * use when attempting to launch a cluster if the node type id is not available.
+ */
+ @JsonProperty("node_type_flexibility")
+ private NodeTypeFlexibility nodeTypeFlexibility;
+
/**
* This field encodes, through a single value, the resources available to each of the Spark nodes
* in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -243,6 +250,15 @@ public Long getMinIdleInstances() {
return minIdleInstances;
}
+ public GetInstancePool setNodeTypeFlexibility(NodeTypeFlexibility nodeTypeFlexibility) {
+ this.nodeTypeFlexibility = nodeTypeFlexibility;
+ return this;
+ }
+
+ public NodeTypeFlexibility getNodeTypeFlexibility() {
+ return nodeTypeFlexibility;
+ }
+
public GetInstancePool setNodeTypeId(String nodeTypeId) {
this.nodeTypeId = nodeTypeId;
return this;
@@ -315,6 +331,7 @@ public boolean equals(Object o) {
&& Objects.equals(instancePoolName, that.instancePoolName)
&& Objects.equals(maxCapacity, that.maxCapacity)
&& Objects.equals(minIdleInstances, that.minIdleInstances)
+ && Objects.equals(nodeTypeFlexibility, that.nodeTypeFlexibility)
&& Objects.equals(nodeTypeId, that.nodeTypeId)
&& Objects.equals(preloadedDockerImages, that.preloadedDockerImages)
&& Objects.equals(preloadedSparkVersions, that.preloadedSparkVersions)
@@ -338,6 +355,7 @@ public int hashCode() {
instancePoolName,
maxCapacity,
minIdleInstances,
+ nodeTypeFlexibility,
nodeTypeId,
preloadedDockerImages,
preloadedSparkVersions,
@@ -361,6 +379,7 @@ public String toString() {
.add("instancePoolName", instancePoolName)
.add("maxCapacity", maxCapacity)
.add("minIdleInstances", minIdleInstances)
+ .add("nodeTypeFlexibility", nodeTypeFlexibility)
.add("nodeTypeId", nodeTypeId)
.add("preloadedDockerImages", preloadedDockerImages)
.add("preloadedSparkVersions", preloadedSparkVersions)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstancePoolAndStats.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstancePoolAndStats.java
index f2fd58676..849c15c52 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstancePoolAndStats.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/InstancePoolAndStats.java
@@ -101,6 +101,13 @@ public class InstancePoolAndStats {
@JsonProperty("min_idle_instances")
private Long minIdleInstances;
+ /**
+ * For Fleet-pool V2, this object contains the information about the alternate node type ids to
+ * use when attempting to launch a cluster if the node type id is not available.
+ */
+ @JsonProperty("node_type_flexibility")
+ private NodeTypeFlexibility nodeTypeFlexibility;
+
/**
* This field encodes, through a single value, the resources available to each of the Spark nodes
* in this cluster. For example, the Spark nodes can be provisioned and optimized for memory or
@@ -243,6 +250,15 @@ public Long getMinIdleInstances() {
return minIdleInstances;
}
+ public InstancePoolAndStats setNodeTypeFlexibility(NodeTypeFlexibility nodeTypeFlexibility) {
+ this.nodeTypeFlexibility = nodeTypeFlexibility;
+ return this;
+ }
+
+ public NodeTypeFlexibility getNodeTypeFlexibility() {
+ return nodeTypeFlexibility;
+ }
+
public InstancePoolAndStats setNodeTypeId(String nodeTypeId) {
this.nodeTypeId = nodeTypeId;
return this;
@@ -316,6 +332,7 @@ public boolean equals(Object o) {
&& Objects.equals(instancePoolName, that.instancePoolName)
&& Objects.equals(maxCapacity, that.maxCapacity)
&& Objects.equals(minIdleInstances, that.minIdleInstances)
+ && Objects.equals(nodeTypeFlexibility, that.nodeTypeFlexibility)
&& Objects.equals(nodeTypeId, that.nodeTypeId)
&& Objects.equals(preloadedDockerImages, that.preloadedDockerImages)
&& Objects.equals(preloadedSparkVersions, that.preloadedSparkVersions)
@@ -339,6 +356,7 @@ public int hashCode() {
instancePoolName,
maxCapacity,
minIdleInstances,
+ nodeTypeFlexibility,
nodeTypeId,
preloadedDockerImages,
preloadedSparkVersions,
@@ -362,6 +380,7 @@ public String toString() {
.add("instancePoolName", instancePoolName)
.add("maxCapacity", maxCapacity)
.add("minIdleInstances", minIdleInstances)
+ .add("nodeTypeFlexibility", nodeTypeFlexibility)
.add("nodeTypeId", nodeTypeId)
.add("preloadedDockerImages", preloadedDockerImages)
.add("preloadedSparkVersions", preloadedSparkVersions)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/NodeTypeFlexibility.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/NodeTypeFlexibility.java
new file mode 100755
index 000000000..7366ed43d
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/compute/NodeTypeFlexibility.java
@@ -0,0 +1,33 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.compute;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import java.util.Objects;
+
+/**
+ * For Fleet-V2 using classic clusters, this object contains the information about the alternate
+ * node type ids to use when attempting to launch a cluster. It can be used with both the driver and
+ * worker node types.
+ */
+@Generated
+public class NodeTypeFlexibility {
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ return true;
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash();
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(NodeTypeFlexibility.class).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieAPI.java
index 93b3e1c93..49a53192a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieAPI.java
@@ -174,11 +174,11 @@ public GenieGenerateDownloadFullQueryResultResponse generateDownloadFullQueryRes
/**
* Generate full query result download.
*
- * Initiate full SQL query result download and obtain a `download_id` to track the download
- * progress. This call initiates a new SQL execution to generate the query result. The result is
- * stored in an external link can be retrieved using the [Get Download Full Query
- * Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks strongly recommends
- * that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. See [Execute
+ * Initiates a new SQL execution and returns a `download_id` that you can use to track the
+ * progress of the download. The query result is stored in an external link and can be retrieved
+ * using the [Get Download Full Query Result](:method:genie/getdownloadfullqueryresult) API.
+ * Warning: Databricks strongly recommends that you protect the URLs that are returned by the
+ * `EXTERNAL_LINKS` disposition. See [Execute
* Statement](:method:statementexecution/executestatement) for more details.
*/
public GenieGenerateDownloadFullQueryResultResponse generateDownloadFullQueryResult(
@@ -205,15 +205,13 @@ public GenieGetDownloadFullQueryResultResponse getDownloadFullQueryResult(
* Get download full query result.
*
* After [Generating a Full Query Result Download](:method:genie/getdownloadfullqueryresult)
- * and successfully receiving a `download_id`, use this API to Poll download progress and retrieve
- * the SQL query result external link(s) upon completion. Warning: Databricks strongly recommends
- * that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. When you use
- * the `EXTERNAL_LINKS` disposition, a short-lived, presigned URL is generated, which can be used
- * to download the results directly from Amazon S3. As a short-lived access credential is embedded
- * in this presigned URL, you should protect the URL. Because presigned URLs are already generated
- * with embedded temporary access credentials, you must not set an Authorization header in the
- * download requests. See [Execute Statement](:method:statementexecution/executestatement) for
- * more details.
+ * and successfully receiving a `download_id`, use this API to poll the download progress. When
+ * the download is complete, the API returns one or more external links to the query result files.
+ * Warning: Databricks strongly recommends that you protect the URLs that are returned by the
+ * `EXTERNAL_LINKS` disposition. You must not set an Authorization header in download requests.
+ * When using the `EXTERNAL_LINKS` disposition, Databricks returns presigned URLs that grant
+ * temporary access to data. See [Execute Statement](:method:statementexecution/executestatement)
+ * for more details.
*/
public GenieGetDownloadFullQueryResultResponse getDownloadFullQueryResult(
GenieGetDownloadFullQueryResultRequest request) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieGenerateDownloadFullQueryResultRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieGenerateDownloadFullQueryResultRequest.java
index b1c8d9a74..a4a38dadf 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieGenerateDownloadFullQueryResultRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieGenerateDownloadFullQueryResultRequest.java
@@ -19,7 +19,7 @@ public class GenieGenerateDownloadFullQueryResultRequest {
/** Message ID */
@JsonIgnore private String messageId;
- /** Space ID */
+ /** Genie space ID */
@JsonIgnore private String spaceId;
public GenieGenerateDownloadFullQueryResultRequest setAttachmentId(String attachmentId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieGetDownloadFullQueryResultRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieGetDownloadFullQueryResultRequest.java
index 9a3024bd7..c3f1cf2e4 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieGetDownloadFullQueryResultRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieGetDownloadFullQueryResultRequest.java
@@ -25,7 +25,7 @@ public class GenieGetDownloadFullQueryResultRequest {
/** Message ID */
@JsonIgnore private String messageId;
- /** Space ID */
+ /** Genie space ID */
@JsonIgnore private String spaceId;
public GenieGetDownloadFullQueryResultRequest setAttachmentId(String attachmentId) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieService.java
index a1063b051..b47db20e0 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieService.java
@@ -44,11 +44,11 @@ GenieGetMessageQueryResultResponse executeMessageQuery(
/**
* Generate full query result download.
*
- * Initiate full SQL query result download and obtain a `download_id` to track the download
- * progress. This call initiates a new SQL execution to generate the query result. The result is
- * stored in an external link can be retrieved using the [Get Download Full Query
- * Result](:method:genie/getdownloadfullqueryresult) API. Warning: Databricks strongly recommends
- * that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. See [Execute
+ * Initiates a new SQL execution and returns a `download_id` that you can use to track the
+ * progress of the download. The query result is stored in an external link and can be retrieved
+ * using the [Get Download Full Query Result](:method:genie/getdownloadfullqueryresult) API.
+ * Warning: Databricks strongly recommends that you protect the URLs that are returned by the
+ * `EXTERNAL_LINKS` disposition. See [Execute
* Statement](:method:statementexecution/executestatement) for more details.
*/
GenieGenerateDownloadFullQueryResultResponse generateDownloadFullQueryResult(
@@ -58,15 +58,13 @@ GenieGenerateDownloadFullQueryResultResponse generateDownloadFullQueryResult(
* Get download full query result.
*
* After [Generating a Full Query Result Download](:method:genie/getdownloadfullqueryresult)
- * and successfully receiving a `download_id`, use this API to Poll download progress and retrieve
- * the SQL query result external link(s) upon completion. Warning: Databricks strongly recommends
- * that you protect the URLs that are returned by the `EXTERNAL_LINKS` disposition. When you use
- * the `EXTERNAL_LINKS` disposition, a short-lived, presigned URL is generated, which can be used
- * to download the results directly from Amazon S3. As a short-lived access credential is embedded
- * in this presigned URL, you should protect the URL. Because presigned URLs are already generated
- * with embedded temporary access credentials, you must not set an Authorization header in the
- * download requests. See [Execute Statement](:method:statementexecution/executestatement) for
- * more details.
+ * and successfully receiving a `download_id`, use this API to poll the download progress. When
+ * the download is complete, the API returns one or more external links to the query result files.
+ * Warning: Databricks strongly recommends that you protect the URLs that are returned by the
+ * `EXTERNAL_LINKS` disposition. You must not set an Authorization header in download requests.
+ * When using the `EXTERNAL_LINKS` disposition, Databricks returns presigned URLs that grant
+ * temporary access to data. See [Execute Statement](:method:statementexecution/executestatement)
+ * for more details.
*/
GenieGetDownloadFullQueryResultResponse getDownloadFullQueryResult(
GenieGetDownloadFullQueryResultRequest genieGetDownloadFullQueryResultRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieSpace.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieSpace.java
index 1be583ef7..4ac20ce24 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieSpace.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/dashboards/GenieSpace.java
@@ -13,7 +13,7 @@ public class GenieSpace {
@JsonProperty("description")
private String description;
- /** Space ID */
+ /** Genie space ID */
@JsonProperty("space_id")
private String spaceId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunState.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunState.java
index 5369e357c..ede9e9aa1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunState.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/CleanRoomTaskRunState.java
@@ -12,14 +12,14 @@
public class CleanRoomTaskRunState {
/**
* A value indicating the run's current lifecycle state. This field is always available in the
- * response.
+ * response. Note: Additional states might be introduced in future releases.
*/
@JsonProperty("life_cycle_state")
private CleanRoomTaskRunLifeCycleState lifeCycleState;
/**
* A value indicating the run's result. This field is only available for terminal lifecycle
- * states.
+ * states. Note: Additional states might be introduced in future releases.
*/
@JsonProperty("result_state")
private CleanRoomTaskRunResultState resultState;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/DashboardTask.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/DashboardTask.java
index 897c1c11a..9037bdd49 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/DashboardTask.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/DashboardTask.java
@@ -10,15 +10,18 @@
/** Configures the Lakeview Dashboard job task type. */
@Generated
public class DashboardTask {
- /** */
+ /** The identifier of the dashboard to refresh. */
@JsonProperty("dashboard_id")
private String dashboardId;
- /** */
+ /** Optional: subscription configuration for sending the dashboard snapshot. */
@JsonProperty("subscription")
private Subscription subscription;
- /** The warehouse id to execute the dashboard with for the schedule */
+ /**
+ * Optional: The warehouse id to execute the dashboard with for the schedule. If not specified,
+ * the default warehouse of the dashboard will be used.
+ */
@JsonProperty("warehouse_id")
private String warehouseId;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RepairHistoryItem.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RepairHistoryItem.java
index 15e4d4ffa..24a8b911d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RepairHistoryItem.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RepairHistoryItem.java
@@ -10,6 +10,18 @@
@Generated
public class RepairHistoryItem {
+ /**
+ * The actual performance target used by the serverless run during execution. This can differ from
+ * the client-set performance target on the request depending on whether the performance mode is
+ * supported by the job type.
+ *
+ * * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
+ * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
+ * optimized cluster performance.
+ */
+ @JsonProperty("effective_performance_target")
+ private PerformanceTarget effectivePerformanceTarget;
+
/** The end time of the (repaired) run. */
@JsonProperty("end_time")
private Long endTime;
@@ -40,6 +52,16 @@ public class RepairHistoryItem {
@JsonProperty("type")
private RepairHistoryItemType typeValue;
+ public RepairHistoryItem setEffectivePerformanceTarget(
+ PerformanceTarget effectivePerformanceTarget) {
+ this.effectivePerformanceTarget = effectivePerformanceTarget;
+ return this;
+ }
+
+ public PerformanceTarget getEffectivePerformanceTarget() {
+ return effectivePerformanceTarget;
+ }
+
public RepairHistoryItem setEndTime(Long endTime) {
this.endTime = endTime;
return this;
@@ -108,7 +130,8 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
RepairHistoryItem that = (RepairHistoryItem) o;
- return Objects.equals(endTime, that.endTime)
+ return Objects.equals(effectivePerformanceTarget, that.effectivePerformanceTarget)
+ && Objects.equals(endTime, that.endTime)
&& Objects.equals(id, that.id)
&& Objects.equals(startTime, that.startTime)
&& Objects.equals(state, that.state)
@@ -119,12 +142,14 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
- return Objects.hash(endTime, id, startTime, state, status, taskRunIds, typeValue);
+ return Objects.hash(
+ effectivePerformanceTarget, endTime, id, startTime, state, status, taskRunIds, typeValue);
}
@Override
public String toString() {
return new ToStringer(RepairHistoryItem.class)
+ .add("effectivePerformanceTarget", effectivePerformanceTarget)
.add("endTime", endTime)
.add("id", id)
.add("startTime", startTime)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RepairRun.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RepairRun.java
index 7fb9ace32..450831d23 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RepairRun.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/RepairRun.java
@@ -63,6 +63,18 @@ public class RepairRun {
@JsonProperty("notebook_params")
private Map * `STANDARD`: Enables cost-efficient execution of serverless workloads. *
+ * `PERFORMANCE_OPTIMIZED`: Prioritizes fast startup and execution times through rapid scaling and
+ * optimized cluster performance.
+ */
+ @JsonProperty("performance_target")
+ private PerformanceTarget performanceTarget;
+
/** Controls whether the pipeline should perform a full refresh */
@JsonProperty("pipeline_params")
private PipelineParams pipelineParams;
@@ -184,6 +196,15 @@ public Map [Link]:
* https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now
@@ -55,6 +55,7 @@ public enum TerminationCodeCode {
// exceeded the
// allotted rate limit. Consider spreading the run execution over a larger time
// frame.
+ DISABLED, // The run was never executed because it was disabled explicitly by the user.
DRIVER_ERROR, // The run encountered an error while communicating with the Spark Driver.
FEATURE_DISABLED, // The run failed because it tried to access a feature unavailable for the
// workspace.
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/TerminationDetails.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/TerminationDetails.java
index ec0aa3fbd..f3db81235 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/TerminationDetails.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/jobs/TerminationDetails.java
@@ -43,7 +43,7 @@ public class TerminationDetails {
* to the state message for further details. * `CLOUD_FAILURE`: The run failed due to a cloud
* provider issue. Refer to the state message for further details. *
* `MAX_JOB_QUEUE_SIZE_EXCEEDED`: The run was skipped due to reaching the job level queue size
- * limit.
+ * limit. * `DISABLED`: The run was never executed because it was disabled explicitly by the user.
*
* [Link]:
* https://kb.databricks.com/en_US/notebooks/too-many-execution-contexts-are-open-right-now
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ComplianceStandard.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ComplianceStandard.java
index cd86c2704..66a0a7d58 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ComplianceStandard.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/ComplianceStandard.java
@@ -17,6 +17,7 @@ public enum ComplianceStandard {
IRAP_PROTECTED,
ISMAP,
ITAR_EAR,
+ K_FSI,
NONE,
PCI_DSS,
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableExportNotebookAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableExportNotebookAPI.java
index 09fb0fb49..c31acf4df 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableExportNotebookAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableExportNotebookAPI.java
@@ -7,7 +7,7 @@
import org.slf4j.LoggerFactory;
/**
- * Controls whether users can export notebooks and files from the Workspace. By default, this
+ * Controls whether users can export notebooks and files from the Workspace UI. By default, this
* setting is enabled.
*/
@Generated
@@ -27,9 +27,9 @@ public EnableExportNotebookAPI(EnableExportNotebookService mock) {
}
/**
- * Get the Enable Export Notebook setting.
+ * Get the Notebook and File exporting setting.
*
- * Gets the Enable Export Notebook setting.
+ * Gets the Notebook and File exporting setting.
*/
public EnableExportNotebook getEnableExportNotebook() {
return impl.getEnableExportNotebook();
@@ -45,10 +45,10 @@ public EnableExportNotebook patchEnableExportNotebook(
}
/**
- * Update the Enable Export Notebook setting.
+ * Update the Notebook and File exporting setting.
*
- * Updates the Enable Export Notebook setting. The model follows eventual consistency, which
- * means the get after the update operation might receive stale values for some time.
+ * Updates the Notebook and File exporting setting. The model follows eventual consistency,
+ * which means the get after the update operation might receive stale values for some time.
*/
public EnableExportNotebook patchEnableExportNotebook(UpdateEnableExportNotebookRequest request) {
return impl.patchEnableExportNotebook(request);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableExportNotebookService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableExportNotebookService.java
index 389a99032..0110669a1 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableExportNotebookService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableExportNotebookService.java
@@ -4,7 +4,7 @@
import com.databricks.sdk.support.Generated;
/**
- * Controls whether users can export notebooks and files from the Workspace. By default, this
+ * Controls whether users can export notebooks and files from the Workspace UI. By default, this
* setting is enabled.
*
* This is the high-level interface, that contains generated methods.
@@ -14,17 +14,17 @@
@Generated
public interface EnableExportNotebookService {
/**
- * Get the Enable Export Notebook setting.
+ * Get the Notebook and File exporting setting.
*
- * Gets the Enable Export Notebook setting.
+ * Gets the Notebook and File exporting setting.
*/
EnableExportNotebook getEnableExportNotebook();
/**
- * Update the Enable Export Notebook setting.
+ * Update the Notebook and File exporting setting.
*
- * Updates the Enable Export Notebook setting. The model follows eventual consistency, which
- * means the get after the update operation might receive stale values for some time.
+ * Updates the Notebook and File exporting setting. The model follows eventual consistency,
+ * which means the get after the update operation might receive stale values for some time.
*/
EnableExportNotebook patchEnableExportNotebook(
UpdateEnableExportNotebookRequest updateEnableExportNotebookRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableNotebookTableClipboardAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableNotebookTableClipboardAPI.java
index 209db42b8..52932e807 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableNotebookTableClipboardAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableNotebookTableClipboardAPI.java
@@ -27,9 +27,9 @@ public EnableNotebookTableClipboardAPI(EnableNotebookTableClipboardService mock)
}
/**
- * Get the Enable Notebook Table Clipboard setting.
+ * Get the Results Table Clipboard features setting.
*
- * Gets the Enable Notebook Table Clipboard setting.
+ * Gets the Results Table Clipboard features setting.
*/
public EnableNotebookTableClipboard getEnableNotebookTableClipboard() {
return impl.getEnableNotebookTableClipboard();
@@ -45,10 +45,11 @@ public EnableNotebookTableClipboard patchEnableNotebookTableClipboard(
}
/**
- * Update the Enable Notebook Table Clipboard setting.
+ * Update the Results Table Clipboard features setting.
*
- * Updates the Enable Notebook Table Clipboard setting. The model follows eventual consistency,
- * which means the get after the update operation might receive stale values for some time.
+ * Updates the Results Table Clipboard features setting. The model follows eventual
+ * consistency, which means the get after the update operation might receive stale values for some
+ * time.
*/
public EnableNotebookTableClipboard patchEnableNotebookTableClipboard(
UpdateEnableNotebookTableClipboardRequest request) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableNotebookTableClipboardService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableNotebookTableClipboardService.java
index 73bdd8051..84a8080c4 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableNotebookTableClipboardService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableNotebookTableClipboardService.java
@@ -14,17 +14,18 @@
@Generated
public interface EnableNotebookTableClipboardService {
/**
- * Get the Enable Notebook Table Clipboard setting.
+ * Get the Results Table Clipboard features setting.
*
- * Gets the Enable Notebook Table Clipboard setting.
+ * Gets the Results Table Clipboard features setting.
*/
EnableNotebookTableClipboard getEnableNotebookTableClipboard();
/**
- * Update the Enable Notebook Table Clipboard setting.
+ * Update the Results Table Clipboard features setting.
*
- * Updates the Enable Notebook Table Clipboard setting. The model follows eventual consistency,
- * which means the get after the update operation might receive stale values for some time.
+ * Updates the Results Table Clipboard features setting. The model follows eventual
+ * consistency, which means the get after the update operation might receive stale values for some
+ * time.
*/
EnableNotebookTableClipboard patchEnableNotebookTableClipboard(
UpdateEnableNotebookTableClipboardRequest updateEnableNotebookTableClipboardRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableResultsDownloadingAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableResultsDownloadingAPI.java
index cf27e858d..87556c6f8 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableResultsDownloadingAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableResultsDownloadingAPI.java
@@ -24,9 +24,9 @@ public EnableResultsDownloadingAPI(EnableResultsDownloadingService mock) {
}
/**
- * Get the Enable Results Downloading setting.
+ * Get the Notebook results download setting.
*
- * Gets the Enable Results Downloading setting.
+ * Gets the Notebook results download setting.
*/
public EnableResultsDownloading getEnableResultsDownloading() {
return impl.getEnableResultsDownloading();
@@ -42,10 +42,10 @@ public EnableResultsDownloading patchEnableResultsDownloading(
}
/**
- * Update the Enable Results Downloading setting.
+ * Update the Notebook results download setting.
*
- * Updates the Enable Results Downloading setting. The model follows eventual consistency,
- * which means the get after the update operation might receive stale values for some time.
+ * Updates the Notebook results download setting. The model follows eventual consistency, which
+ * means the get after the update operation might receive stale values for some time.
*/
public EnableResultsDownloading patchEnableResultsDownloading(
UpdateEnableResultsDownloadingRequest request) {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableResultsDownloadingService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableResultsDownloadingService.java
index 7cf41fdaf..55b16c93b 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableResultsDownloadingService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/EnableResultsDownloadingService.java
@@ -13,17 +13,17 @@
@Generated
public interface EnableResultsDownloadingService {
/**
- * Get the Enable Results Downloading setting.
+ * Get the Notebook results download setting.
*
- * Gets the Enable Results Downloading setting.
+ * Gets the Notebook results download setting.
*/
EnableResultsDownloading getEnableResultsDownloading();
/**
- * Update the Enable Results Downloading setting.
+ * Update the Notebook results download setting.
*
- * Updates the Enable Results Downloading setting. The model follows eventual consistency,
- * which means the get after the update operation might receive stale values for some time.
+ * Updates the Notebook results download setting. The model follows eventual consistency, which
+ * means the get after the update operation might receive stale values for some time.
*/
EnableResultsDownloading patchEnableResultsDownloading(
UpdateEnableResultsDownloadingRequest updateEnableResultsDownloadingRequest);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java
index 9d67e7a5c..81648bdf3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/settings/SettingsAPI.java
@@ -116,7 +116,7 @@ public DisableLegacyDbfsAPI DisableLegacyDbfs() {
return disableLegacyDbfsAPI;
}
- /** Controls whether users can export notebooks and files from the Workspace. */
+ /** Controls whether users can export notebooks and files from the Workspace UI. */
public EnableExportNotebookAPI EnableExportNotebook() {
return enableExportNotebookAPI;
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateEndpoint.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateEndpoint.java
index 5e6ab7a73..433feb92d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateEndpoint.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateEndpoint.java
@@ -9,14 +9,27 @@
@Generated
public class CreateEndpoint {
- /** Type of endpoint. */
+ /** The budget policy id to be applied */
+ @JsonProperty("budget_policy_id")
+ private String budgetPolicyId;
+
+ /** Type of endpoint */
@JsonProperty("endpoint_type")
private EndpointType endpointType;
- /** Name of endpoint */
+ /** Name of the vector search endpoint */
@JsonProperty("name")
private String name;
+ public CreateEndpoint setBudgetPolicyId(String budgetPolicyId) {
+ this.budgetPolicyId = budgetPolicyId;
+ return this;
+ }
+
+ public String getBudgetPolicyId() {
+ return budgetPolicyId;
+ }
+
public CreateEndpoint setEndpointType(EndpointType endpointType) {
this.endpointType = endpointType;
return this;
@@ -40,17 +53,20 @@ public boolean equals(Object o) {
if (this == o) return true;
if (o == null || getClass() != o.getClass()) return false;
CreateEndpoint that = (CreateEndpoint) o;
- return Objects.equals(endpointType, that.endpointType) && Objects.equals(name, that.name);
+ return Objects.equals(budgetPolicyId, that.budgetPolicyId)
+ && Objects.equals(endpointType, that.endpointType)
+ && Objects.equals(name, that.name);
}
@Override
public int hashCode() {
- return Objects.hash(endpointType, name);
+ return Objects.hash(budgetPolicyId, endpointType, name);
}
@Override
public String toString() {
return new ToStringer(CreateEndpoint.class)
+ .add("budgetPolicyId", budgetPolicyId)
.add("endpointType", endpointType)
.add("name", name)
.toString();
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateVectorIndexRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateVectorIndexRequest.java
index 8d55800d5..0856cbced 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateVectorIndexRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateVectorIndexRequest.java
@@ -22,12 +22,11 @@ public class CreateVectorIndexRequest {
private String endpointName;
/**
- * There are 2 types of Vector Search indexes:
- *
- * - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically
- * and incrementally updating the index as the underlying data in the Delta Table changes. -
- * `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through
- * our REST and SDK APIs. With this model, the user manages index updates.
+ * There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs
+ * with a source Delta Table, automatically and incrementally updating the index as the underlying
+ * data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and
+ * write of vectors and metadata through our REST and SDK APIs. With this model, the user manages
+ * index updates.
*/
@JsonProperty("index_type")
private VectorIndexType indexType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateVectorIndexResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateVectorIndexResponse.java
deleted file mode 100755
index cc483c43c..000000000
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CreateVectorIndexResponse.java
+++ /dev/null
@@ -1,44 +0,0 @@
-// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
-
-package com.databricks.sdk.service.vectorsearch;
-
-import com.databricks.sdk.support.Generated;
-import com.databricks.sdk.support.ToStringer;
-import com.fasterxml.jackson.annotation.JsonProperty;
-import java.util.Objects;
-
-@Generated
-public class CreateVectorIndexResponse {
- /** */
- @JsonProperty("vector_index")
- private VectorIndex vectorIndex;
-
- public CreateVectorIndexResponse setVectorIndex(VectorIndex vectorIndex) {
- this.vectorIndex = vectorIndex;
- return this;
- }
-
- public VectorIndex getVectorIndex() {
- return vectorIndex;
- }
-
- @Override
- public boolean equals(Object o) {
- if (this == o) return true;
- if (o == null || getClass() != o.getClass()) return false;
- CreateVectorIndexResponse that = (CreateVectorIndexResponse) o;
- return Objects.equals(vectorIndex, that.vectorIndex);
- }
-
- @Override
- public int hashCode() {
- return Objects.hash(vectorIndex);
- }
-
- @Override
- public String toString() {
- return new ToStringer(CreateVectorIndexResponse.class)
- .add("vectorIndex", vectorIndex)
- .toString();
- }
-}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CustomTag.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CustomTag.java
new file mode 100755
index 000000000..1736987e3
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/CustomTag.java
@@ -0,0 +1,55 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.vectorsearch;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class CustomTag {
+ /** Key field for a vector search endpoint tag. */
+ @JsonProperty("key")
+ private String key;
+
+ /** [Optional] Value field for a vector search endpoint tag. */
+ @JsonProperty("value")
+ private String value;
+
+ public CustomTag setKey(String key) {
+ this.key = key;
+ return this;
+ }
+
+ public String getKey() {
+ return key;
+ }
+
+ public CustomTag setValue(String value) {
+ this.value = value;
+ return this;
+ }
+
+ public String getValue() {
+ return value;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ CustomTag that = (CustomTag) o;
+ return Objects.equals(key, that.key) && Objects.equals(value, that.value);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(key, value);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(CustomTag.class).add("key", key).add("value", value).toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataResult.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataResult.java
index 00df1c370..3173fd54a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataResult.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataResult.java
@@ -8,7 +8,6 @@
import java.util.Collection;
import java.util.Objects;
-/** Result of the upsert or delete operation. */
@Generated
public class DeleteDataResult {
/** List of primary keys for rows that failed to process. */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataStatus.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataStatus.java
index dcde6f693..d28dd646a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataStatus.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataStatus.java
@@ -4,7 +4,6 @@
import com.databricks.sdk.support.Generated;
-/** Status of the delete operation. */
@Generated
public enum DeleteDataStatus {
FAILURE,
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataVectorIndexRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataVectorIndexRequest.java
index e06f0ab1d..aa5783a57 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataVectorIndexRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeleteDataVectorIndexRequest.java
@@ -3,20 +3,21 @@
package com.databricks.sdk.service.vectorsearch;
import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.QueryParam;
import com.databricks.sdk.support.ToStringer;
import com.fasterxml.jackson.annotation.JsonIgnore;
-import com.fasterxml.jackson.annotation.JsonProperty;
import java.util.Collection;
import java.util.Objects;
-/** Request payload for deleting data from a vector index. */
+/** Delete data from index */
@Generated
public class DeleteDataVectorIndexRequest {
/** Name of the vector index where data is to be deleted. Must be a Direct Vector Access Index. */
@JsonIgnore private String indexName;
/** List of primary keys for the data to be deleted. */
- @JsonProperty("primary_keys")
+ @JsonIgnore
+ @QueryParam("primary_keys")
private Collection - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops
- * processing after successfully refreshing the source table in the pipeline once, ensuring the
- * table is updated based on the data available when the update started. - `CONTINUOUS`: If the
- * pipeline uses continuous execution, the pipeline processes new data as it arrives in the source
- * table to keep vector index fresh.
+ * Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered execution mode, the
+ * system stops processing after successfully refreshing the source table in the pipeline once,
+ * ensuring the table is updated based on the data available when the update started. -
+ * `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline processes new data as it
+ * arrives in the source table to keep vector index fresh.
*/
@JsonProperty("pipeline_type")
private PipelineType pipelineType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeltaSyncVectorIndexSpecResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeltaSyncVectorIndexSpecResponse.java
index 5c0153c98..9f2f17700 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeltaSyncVectorIndexSpecResponse.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DeltaSyncVectorIndexSpecResponse.java
@@ -30,13 +30,11 @@ public class DeltaSyncVectorIndexSpecResponse {
private String pipelineId;
/**
- * Pipeline execution mode.
- *
- * - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops
- * processing after successfully refreshing the source table in the pipeline once, ensuring the
- * table is updated based on the data available when the update started. - `CONTINUOUS`: If the
- * pipeline uses continuous execution, the pipeline processes new data as it arrives in the source
- * table to keep vector index fresh.
+ * Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered execution mode, the
+ * system stops processing after successfully refreshing the source table in the pipeline once,
+ * ensuring the table is updated based on the data available when the update started. -
+ * `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline processes new data as it
+ * arrives in the source table to keep vector index fresh.
*/
@JsonProperty("pipeline_type")
private PipelineType pipelineType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DirectAccessVectorIndexSpec.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DirectAccessVectorIndexSpec.java
index 406639195..579c22ce3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DirectAccessVectorIndexSpec.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/DirectAccessVectorIndexSpec.java
@@ -10,21 +10,18 @@
@Generated
public class DirectAccessVectorIndexSpec {
- /** Contains the optional model endpoint to use during query time. */
+ /** The columns that contain the embedding source. The format should be array[double]. */
@JsonProperty("embedding_source_columns")
private Collection Supported types are `integer`, `long`, `float`, `double`, `boolean`, `string`, `date`,
- * `timestamp`.
- *
- * Supported types for vector column: `array The JSON representation for `ListValue` is JSON array.
+ */
@Generated
public class ListValue {
- /** */
+ /** Repeated field of dynamically typed values. */
@JsonProperty("values")
private Collection - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically
- * and incrementally updating the index as the underlying data in the Delta Table changes. -
- * `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through
- * our REST and SDK APIs. With this model, the user manages index updates.
+ * There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs
+ * with a source Delta Table, automatically and incrementally updating the index as the underlying
+ * data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and
+ * write of vectors and metadata through our REST and SDK APIs. With this model, the user manages
+ * index updates.
*/
@JsonProperty("index_type")
private VectorIndexType indexType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PatchEndpointBudgetPolicyRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PatchEndpointBudgetPolicyRequest.java
new file mode 100755
index 000000000..c1d571df8
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PatchEndpointBudgetPolicyRequest.java
@@ -0,0 +1,59 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.vectorsearch;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class PatchEndpointBudgetPolicyRequest {
+ /** The budget policy id to be applied */
+ @JsonProperty("budget_policy_id")
+ private String budgetPolicyId;
+
+ /** Name of the vector search endpoint */
+ @JsonIgnore private String endpointName;
+
+ public PatchEndpointBudgetPolicyRequest setBudgetPolicyId(String budgetPolicyId) {
+ this.budgetPolicyId = budgetPolicyId;
+ return this;
+ }
+
+ public String getBudgetPolicyId() {
+ return budgetPolicyId;
+ }
+
+ public PatchEndpointBudgetPolicyRequest setEndpointName(String endpointName) {
+ this.endpointName = endpointName;
+ return this;
+ }
+
+ public String getEndpointName() {
+ return endpointName;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ PatchEndpointBudgetPolicyRequest that = (PatchEndpointBudgetPolicyRequest) o;
+ return Objects.equals(budgetPolicyId, that.budgetPolicyId)
+ && Objects.equals(endpointName, that.endpointName);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(budgetPolicyId, endpointName);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(PatchEndpointBudgetPolicyRequest.class)
+ .add("budgetPolicyId", budgetPolicyId)
+ .add("endpointName", endpointName)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PatchEndpointBudgetPolicyResponse.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PatchEndpointBudgetPolicyResponse.java
new file mode 100755
index 000000000..2a0e6337b
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PatchEndpointBudgetPolicyResponse.java
@@ -0,0 +1,45 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.vectorsearch;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Objects;
+
+@Generated
+public class PatchEndpointBudgetPolicyResponse {
+ /** The budget policy applied to the vector search endpoint. */
+ @JsonProperty("effective_budget_policy_id")
+ private String effectiveBudgetPolicyId;
+
+ public PatchEndpointBudgetPolicyResponse setEffectiveBudgetPolicyId(
+ String effectiveBudgetPolicyId) {
+ this.effectiveBudgetPolicyId = effectiveBudgetPolicyId;
+ return this;
+ }
+
+ public String getEffectiveBudgetPolicyId() {
+ return effectiveBudgetPolicyId;
+ }
+
+ @Override
+ public boolean equals(Object o) {
+ if (this == o) return true;
+ if (o == null || getClass() != o.getClass()) return false;
+ PatchEndpointBudgetPolicyResponse that = (PatchEndpointBudgetPolicyResponse) o;
+ return Objects.equals(effectiveBudgetPolicyId, that.effectiveBudgetPolicyId);
+ }
+
+ @Override
+ public int hashCode() {
+ return Objects.hash(effectiveBudgetPolicyId);
+ }
+
+ @Override
+ public String toString() {
+ return new ToStringer(PatchEndpointBudgetPolicyResponse.class)
+ .add("effectiveBudgetPolicyId", effectiveBudgetPolicyId)
+ .toString();
+ }
+}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PipelineType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PipelineType.java
index bef62c430..d3025ed9f 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PipelineType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/PipelineType.java
@@ -5,13 +5,11 @@
import com.databricks.sdk.support.Generated;
/**
- * Pipeline execution mode.
- *
- * - `TRIGGERED`: If the pipeline uses the triggered execution mode, the system stops processing
- * after successfully refreshing the source table in the pipeline once, ensuring the table is
- * updated based on the data available when the update started. - `CONTINUOUS`: If the pipeline uses
- * continuous execution, the pipeline processes new data as it arrives in the source table to keep
- * vector index fresh.
+ * Pipeline execution mode. - `TRIGGERED`: If the pipeline uses the triggered execution mode, the
+ * system stops processing after successfully refreshing the source table in the pipeline once,
+ * ensuring the table is updated based on the data available when the update started. -
+ * `CONTINUOUS`: If the pipeline uses continuous execution, the pipeline processes new data as it
+ * arrives in the source table to keep vector index fresh.
*/
@Generated
public enum PipelineType {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/QueryVectorIndexRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/QueryVectorIndexRequest.java
index dcd707a24..e07f748b7 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/QueryVectorIndexRequest.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/QueryVectorIndexRequest.java
@@ -22,9 +22,11 @@ public class QueryVectorIndexRequest {
/**
* JSON string representing query filters.
*
- * Example filters: - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for id
- * greater than 5. - `{"id <=": 5}`: Filter for id less than equal to 5. - `{"id >=": 5}`: Filter
- * for id greater than equal to 5. - `{"id": 5}`: Filter for id equal to 5.
+ * Example filters:
+ *
+ * - `{"id <": 5}`: Filter for id less than 5. - `{"id >": 5}`: Filter for id greater than 5. -
+ * `{"id <=": 5}`: Filter for id less than equal to 5. - `{"id >=": 5}`: Filter for id greater
+ * than equal to 5. - `{"id": 5}`: Filter for id equal to 5.
*/
@JsonProperty("filters_json")
private String filtersJson;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/ResultData.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/ResultData.java
index 76b6bf9ac..abed6988d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/ResultData.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/ResultData.java
@@ -13,18 +13,18 @@
public class ResultData {
/** Data rows returned in the query. */
@JsonProperty("data_array")
- private Collection The JSON representation for `Struct` is JSON object.
+ */
@Generated
public class Struct {
/** Data entry, corresponding to a row in a vector index. */
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/UpdateEndpointCustomTagsRequest.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/UpdateEndpointCustomTagsRequest.java
new file mode 100755
index 000000000..3a246a96a
--- /dev/null
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/UpdateEndpointCustomTagsRequest.java
@@ -0,0 +1,60 @@
+// Code generated from OpenAPI specs by Databricks SDK Generator. DO NOT EDIT.
+
+package com.databricks.sdk.service.vectorsearch;
+
+import com.databricks.sdk.support.Generated;
+import com.databricks.sdk.support.ToStringer;
+import com.fasterxml.jackson.annotation.JsonIgnore;
+import com.fasterxml.jackson.annotation.JsonProperty;
+import java.util.Collection;
+import java.util.Objects;
+
+@Generated
+public class UpdateEndpointCustomTagsRequest {
+ /** The new custom tags for the vector search endpoint */
+ @JsonProperty("custom_tags")
+ private Collection The JSON representation for `ListValue` is JSON array.
+ */
@JsonProperty("list_value")
private ListValue listValue;
- /** */
- @JsonProperty("null_value")
- private String nullValue;
-
/** */
@JsonProperty("number_value")
private Double numberValue;
@@ -29,7 +31,16 @@ public class Value {
@JsonProperty("string_value")
private String stringValue;
- /** */
+ /**
+ * copied from proto3 / Google Well Known Types, source:
+ * https://github.com/protocolbuffers/protobuf/blob/450d24ca820750c5db5112a6f0b0c2efb9758021/src/google/protobuf/struct.proto
+ * `Struct` represents a structured data value, consisting of fields which map to dynamically
+ * typed values. In some languages, `Struct` might be supported by a native representation. For
+ * example, in scripting languages like JS a struct is represented as an object. The details of
+ * that representation are described together with the proto support for the language.
+ *
+ * The JSON representation for `Struct` is JSON object.
+ */
@JsonProperty("struct_value")
private Struct structValue;
@@ -51,15 +62,6 @@ public ListValue getListValue() {
return listValue;
}
- public Value setNullValue(String nullValue) {
- this.nullValue = nullValue;
- return this;
- }
-
- public String getNullValue() {
- return nullValue;
- }
-
public Value setNumberValue(Double numberValue) {
this.numberValue = numberValue;
return this;
@@ -94,7 +96,6 @@ public boolean equals(Object o) {
Value that = (Value) o;
return Objects.equals(boolValue, that.boolValue)
&& Objects.equals(listValue, that.listValue)
- && Objects.equals(nullValue, that.nullValue)
&& Objects.equals(numberValue, that.numberValue)
&& Objects.equals(stringValue, that.stringValue)
&& Objects.equals(structValue, that.structValue);
@@ -102,7 +103,7 @@ public boolean equals(Object o) {
@Override
public int hashCode() {
- return Objects.hash(boolValue, listValue, nullValue, numberValue, stringValue, structValue);
+ return Objects.hash(boolValue, listValue, numberValue, stringValue, structValue);
}
@Override
@@ -110,7 +111,6 @@ public String toString() {
return new ToStringer(Value.class)
.add("boolValue", boolValue)
.add("listValue", listValue)
- .add("nullValue", nullValue)
.add("numberValue", numberValue)
.add("stringValue", stringValue)
.add("structValue", structValue)
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorIndex.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorIndex.java
index 403982239..f5922390d 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorIndex.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorIndex.java
@@ -26,12 +26,11 @@ public class VectorIndex {
private String endpointName;
/**
- * There are 2 types of Vector Search indexes:
- *
- * - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically
- * and incrementally updating the index as the underlying data in the Delta Table changes. -
- * `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through
- * our REST and SDK APIs. With this model, the user manages index updates.
+ * There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs
+ * with a source Delta Table, automatically and incrementally updating the index as the underlying
+ * data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and
+ * write of vectors and metadata through our REST and SDK APIs. With this model, the user manages
+ * index updates.
*/
@JsonProperty("index_type")
private VectorIndexType indexType;
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorIndexType.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorIndexType.java
index 21367a4ee..20d56ffdf 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorIndexType.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorIndexType.java
@@ -5,12 +5,11 @@
import com.databricks.sdk.support.Generated;
/**
- * There are 2 types of Vector Search indexes:
- *
- * - `DELTA_SYNC`: An index that automatically syncs with a source Delta Table, automatically and
- * incrementally updating the index as the underlying data in the Delta Table changes. -
- * `DIRECT_ACCESS`: An index that supports direct read and write of vectors and metadata through our
- * REST and SDK APIs. With this model, the user manages index updates.
+ * There are 2 types of Vector Search indexes: - `DELTA_SYNC`: An index that automatically syncs
+ * with a source Delta Table, automatically and incrementally updating the index as the underlying
+ * data in the Delta Table changes. - `DIRECT_ACCESS`: An index that supports direct read and write
+ * of vectors and metadata through our REST and SDK APIs. With this model, the user manages index
+ * updates.
*/
@Generated
public enum VectorIndexType {
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchEndpointsAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchEndpointsAPI.java
index f65ea66c9..fbf782186 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchEndpointsAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchEndpointsAPI.java
@@ -8,6 +8,7 @@
import com.databricks.sdk.support.Wait;
import java.time.Duration;
import java.util.Arrays;
+import java.util.Collection;
import java.util.concurrent.TimeoutException;
import java.util.function.Consumer;
import org.slf4j.Logger;
@@ -100,7 +101,11 @@ public void deleteEndpoint(String endpointName) {
deleteEndpoint(new DeleteEndpointRequest().setEndpointName(endpointName));
}
- /** Delete an endpoint. */
+ /**
+ * Delete an endpoint.
+ *
+ * Delete a vector search endpoint.
+ */
public void deleteEndpoint(DeleteEndpointRequest request) {
impl.deleteEndpoint(request);
}
@@ -109,12 +114,20 @@ public EndpointInfo getEndpoint(String endpointName) {
return getEndpoint(new GetEndpointRequest().setEndpointName(endpointName));
}
- /** Get an endpoint. */
+ /**
+ * Get an endpoint.
+ *
+ * Get details for a single vector search endpoint.
+ */
public EndpointInfo getEndpoint(GetEndpointRequest request) {
return impl.getEndpoint(request);
}
- /** List all endpoints. */
+ /**
+ * List all endpoints.
+ *
+ * List all vector search endpoints in the workspace.
+ */
public Iterable Update the budget policy of an endpoint
+ */
+ public PatchEndpointBudgetPolicyResponse updateEndpointBudgetPolicy(
+ PatchEndpointBudgetPolicyRequest request) {
+ return impl.updateEndpointBudgetPolicy(request);
+ }
+
+ public UpdateEndpointCustomTagsResponse updateEndpointCustomTags(
+ String endpointName, Collection Delete a vector search endpoint.
+ */
void deleteEndpoint(DeleteEndpointRequest deleteEndpointRequest);
- /** Get an endpoint. */
+ /**
+ * Get an endpoint.
+ *
+ * Get details for a single vector search endpoint.
+ */
EndpointInfo getEndpoint(GetEndpointRequest getEndpointRequest);
- /** List all endpoints. */
+ /**
+ * List all endpoints.
+ *
+ * List all vector search endpoints in the workspace.
+ */
ListEndpointResponse listEndpoints(ListEndpointsRequest listEndpointsRequest);
+
+ /**
+ * Update the budget policy of an endpoint.
+ *
+ * Update the budget policy of an endpoint
+ */
+ PatchEndpointBudgetPolicyResponse updateEndpointBudgetPolicy(
+ PatchEndpointBudgetPolicyRequest patchEndpointBudgetPolicyRequest);
+
+ /** Update the custom tags of an endpoint. */
+ UpdateEndpointCustomTagsResponse updateEndpointCustomTags(
+ UpdateEndpointCustomTagsRequest updateEndpointCustomTagsRequest);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesAPI.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesAPI.java
index 6e19bbad6..a7d7e328a 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesAPI.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesAPI.java
@@ -12,9 +12,9 @@
* **Index**: An efficient representation of your embedding vectors that supports real-time and
* efficient approximate nearest neighbor (ANN) search queries.
*
- * There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index that
+ * There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index that
* automatically syncs with a source Delta Table, automatically and incrementally updating the index
- * as the underlying data in the Delta Table changes. * **Direct Vector Access Index**: An index
+ * as the underlying data in the Delta Table changes. - **Direct Vector Access Index**: An index
* that supports direct read and write of vectors and metadata through our REST and SDK APIs. With
* this model, the user manages index updates.
*/
@@ -34,7 +34,7 @@ public VectorSearchIndexesAPI(VectorSearchIndexesService mock) {
impl = mock;
}
- public CreateVectorIndexResponse createIndex(
+ public VectorIndex createIndex(
String name, String endpointName, String primaryKey, VectorIndexType indexType) {
return createIndex(
new CreateVectorIndexRequest()
@@ -49,7 +49,7 @@ public CreateVectorIndexResponse createIndex(
*
* Create a new index.
*/
- public CreateVectorIndexResponse createIndex(CreateVectorIndexRequest request) {
+ public VectorIndex createIndex(CreateVectorIndexRequest request) {
return impl.createIndex(request);
}
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesImpl.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesImpl.java
index b49d5b65f..7933132f3 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesImpl.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesImpl.java
@@ -17,14 +17,14 @@ public VectorSearchIndexesImpl(ApiClient apiClient) {
}
@Override
- public CreateVectorIndexResponse createIndex(CreateVectorIndexRequest request) {
+ public VectorIndex createIndex(CreateVectorIndexRequest request) {
String path = "/api/2.0/vector-search/indexes";
try {
Request req = new Request("POST", path, apiClient.serialize(request));
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
req.withHeader("Content-Type", "application/json");
- return apiClient.execute(req, CreateVectorIndexResponse.class);
+ return apiClient.execute(req, VectorIndex.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
}
@@ -35,10 +35,9 @@ public DeleteDataVectorIndexResponse deleteDataVectorIndex(DeleteDataVectorIndex
String path =
String.format("/api/2.0/vector-search/indexes/%s/delete-data", request.getIndexName());
try {
- Request req = new Request("POST", path, apiClient.serialize(request));
+ Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
req.withHeader("Accept", "application/json");
- req.withHeader("Content-Type", "application/json");
return apiClient.execute(req, DeleteDataVectorIndexResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
@@ -51,6 +50,7 @@ public void deleteIndex(DeleteIndexRequest request) {
try {
Request req = new Request("DELETE", path);
ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
apiClient.execute(req, DeleteIndexResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
@@ -132,6 +132,7 @@ public void syncIndex(SyncIndexRequest request) {
try {
Request req = new Request("POST", path);
ApiClient.setQuery(req, request);
+ req.withHeader("Accept", "application/json");
apiClient.execute(req, SyncIndexResponse.class);
} catch (IOException e) {
throw new DatabricksException("IO error: " + e.getMessage(), e);
diff --git a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesService.java b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesService.java
index c1f1110fe..4f864e450 100755
--- a/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesService.java
+++ b/databricks-sdk-java/src/main/java/com/databricks/sdk/service/vectorsearch/VectorSearchIndexesService.java
@@ -7,9 +7,9 @@
* **Index**: An efficient representation of your embedding vectors that supports real-time and
* efficient approximate nearest neighbor (ANN) search queries.
*
- * There are 2 types of Vector Search indexes: * **Delta Sync Index**: An index that
+ * There are 2 types of Vector Search indexes: - **Delta Sync Index**: An index that
* automatically syncs with a source Delta Table, automatically and incrementally updating the index
- * as the underlying data in the Delta Table changes. * **Direct Vector Access Index**: An index
+ * as the underlying data in the Delta Table changes. - **Direct Vector Access Index**: An index
* that supports direct read and write of vectors and metadata through our REST and SDK APIs. With
* this model, the user manages index updates.
*
@@ -24,7 +24,7 @@ public interface VectorSearchIndexesService {
*
* Create a new index.
*/
- CreateVectorIndexResponse createIndex(CreateVectorIndexRequest createVectorIndexRequest);
+ VectorIndex createIndex(CreateVectorIndexRequest createVectorIndexRequest);
/**
* Delete data from index.