From 59ab9a696320083de7d04b2d0e6cd3499a9ceeec Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Mon, 10 Mar 2025 21:54:11 -0700 Subject: [PATCH 1/2] [SPARK-51463] Add `Spark Connect`-generated `Swift` source code --- .github/.licenserc.yaml | 1 + Sources/SparkConnect/base.grpc.swift | 2437 ++++ Sources/SparkConnect/base.pb.swift | 7606 +++++++++++ Sources/SparkConnect/catalog.grpc.swift | 26 + Sources/SparkConnect/catalog.pb.swift | 2309 ++++ Sources/SparkConnect/commands.grpc.swift | 26 + Sources/SparkConnect/commands.pb.swift | 4588 +++++++ Sources/SparkConnect/common.grpc.swift | 26 + Sources/SparkConnect/common.pb.swift | 1115 ++ .../SparkConnect/example_plugins.grpc.swift | 26 + Sources/SparkConnect/example_plugins.pb.swift | 215 + Sources/SparkConnect/expressions.grpc.swift | 26 + Sources/SparkConnect/expressions.pb.swift | 4922 +++++++ Sources/SparkConnect/ml.grpc.swift | 26 + Sources/SparkConnect/ml.pb.swift | 1004 ++ Sources/SparkConnect/ml_common.grpc.swift | 26 + Sources/SparkConnect/ml_common.pb.swift | 263 + Sources/SparkConnect/relations.grpc.swift | 26 + Sources/SparkConnect/relations.pb.swift | 10559 ++++++++++++++++ Sources/SparkConnect/types.grpc.swift | 26 + Sources/SparkConnect/types.pb.swift | 2457 ++++ 21 files changed, 37710 insertions(+) create mode 100644 Sources/SparkConnect/base.grpc.swift create mode 100644 Sources/SparkConnect/base.pb.swift create mode 100644 Sources/SparkConnect/catalog.grpc.swift create mode 100644 Sources/SparkConnect/catalog.pb.swift create mode 100644 Sources/SparkConnect/commands.grpc.swift create mode 100644 Sources/SparkConnect/commands.pb.swift create mode 100644 Sources/SparkConnect/common.grpc.swift create mode 100644 Sources/SparkConnect/common.pb.swift create mode 100644 Sources/SparkConnect/example_plugins.grpc.swift create mode 100644 Sources/SparkConnect/example_plugins.pb.swift create mode 100644 Sources/SparkConnect/expressions.grpc.swift create mode 100644 Sources/SparkConnect/expressions.pb.swift create mode 100644 Sources/SparkConnect/ml.grpc.swift create mode 100644 Sources/SparkConnect/ml.pb.swift create mode 100644 Sources/SparkConnect/ml_common.grpc.swift create mode 100644 Sources/SparkConnect/ml_common.pb.swift create mode 100644 Sources/SparkConnect/relations.grpc.swift create mode 100644 Sources/SparkConnect/relations.pb.swift create mode 100644 Sources/SparkConnect/types.grpc.swift create mode 100644 Sources/SparkConnect/types.pb.swift diff --git a/.github/.licenserc.yaml b/.github/.licenserc.yaml index 2ab49a4..27bf15a 100644 --- a/.github/.licenserc.yaml +++ b/.github/.licenserc.yaml @@ -15,5 +15,6 @@ header: - '.asf.yaml' - '.nojekyll' - 'Package.swift' + - '**/*pb.swift' comment: on-failure diff --git a/Sources/SparkConnect/base.grpc.swift b/Sources/SparkConnect/base.grpc.swift new file mode 100644 index 0000000..85e84c4 --- /dev/null +++ b/Sources/SparkConnect/base.grpc.swift @@ -0,0 +1,2437 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/base.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +import GRPCCore +import GRPCProtobuf +import SwiftProtobuf + +// MARK: - spark.connect.SparkConnectService + +/// Namespace containing generated types for the "spark.connect.SparkConnectService" service. +internal enum Spark_Connect_SparkConnectService { + /// Service descriptor for the "spark.connect.SparkConnectService" service. + internal static let descriptor = GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService") + /// Namespace for method metadata. + internal enum Method { + /// Namespace for "ExecutePlan" metadata. + internal enum ExecutePlan { + /// Request type for "ExecutePlan". + internal typealias Input = Spark_Connect_ExecutePlanRequest + /// Response type for "ExecutePlan". + internal typealias Output = Spark_Connect_ExecutePlanResponse + /// Descriptor for "ExecutePlan". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "ExecutePlan" + ) + } + /// Namespace for "AnalyzePlan" metadata. + internal enum AnalyzePlan { + /// Request type for "AnalyzePlan". + internal typealias Input = Spark_Connect_AnalyzePlanRequest + /// Response type for "AnalyzePlan". + internal typealias Output = Spark_Connect_AnalyzePlanResponse + /// Descriptor for "AnalyzePlan". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "AnalyzePlan" + ) + } + /// Namespace for "Config" metadata. + internal enum Config { + /// Request type for "Config". + internal typealias Input = Spark_Connect_ConfigRequest + /// Response type for "Config". + internal typealias Output = Spark_Connect_ConfigResponse + /// Descriptor for "Config". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "Config" + ) + } + /// Namespace for "AddArtifacts" metadata. + internal enum AddArtifacts { + /// Request type for "AddArtifacts". + internal typealias Input = Spark_Connect_AddArtifactsRequest + /// Response type for "AddArtifacts". + internal typealias Output = Spark_Connect_AddArtifactsResponse + /// Descriptor for "AddArtifacts". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "AddArtifacts" + ) + } + /// Namespace for "ArtifactStatus" metadata. + internal enum ArtifactStatus { + /// Request type for "ArtifactStatus". + internal typealias Input = Spark_Connect_ArtifactStatusesRequest + /// Response type for "ArtifactStatus". + internal typealias Output = Spark_Connect_ArtifactStatusesResponse + /// Descriptor for "ArtifactStatus". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "ArtifactStatus" + ) + } + /// Namespace for "Interrupt" metadata. + internal enum Interrupt { + /// Request type for "Interrupt". + internal typealias Input = Spark_Connect_InterruptRequest + /// Response type for "Interrupt". + internal typealias Output = Spark_Connect_InterruptResponse + /// Descriptor for "Interrupt". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "Interrupt" + ) + } + /// Namespace for "ReattachExecute" metadata. + internal enum ReattachExecute { + /// Request type for "ReattachExecute". + internal typealias Input = Spark_Connect_ReattachExecuteRequest + /// Response type for "ReattachExecute". + internal typealias Output = Spark_Connect_ExecutePlanResponse + /// Descriptor for "ReattachExecute". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "ReattachExecute" + ) + } + /// Namespace for "ReleaseExecute" metadata. + internal enum ReleaseExecute { + /// Request type for "ReleaseExecute". + internal typealias Input = Spark_Connect_ReleaseExecuteRequest + /// Response type for "ReleaseExecute". + internal typealias Output = Spark_Connect_ReleaseExecuteResponse + /// Descriptor for "ReleaseExecute". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "ReleaseExecute" + ) + } + /// Namespace for "ReleaseSession" metadata. + internal enum ReleaseSession { + /// Request type for "ReleaseSession". + internal typealias Input = Spark_Connect_ReleaseSessionRequest + /// Response type for "ReleaseSession". + internal typealias Output = Spark_Connect_ReleaseSessionResponse + /// Descriptor for "ReleaseSession". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "ReleaseSession" + ) + } + /// Namespace for "FetchErrorDetails" metadata. + internal enum FetchErrorDetails { + /// Request type for "FetchErrorDetails". + internal typealias Input = Spark_Connect_FetchErrorDetailsRequest + /// Response type for "FetchErrorDetails". + internal typealias Output = Spark_Connect_FetchErrorDetailsResponse + /// Descriptor for "FetchErrorDetails". + internal static let descriptor = GRPCCore.MethodDescriptor( + service: GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService"), + method: "FetchErrorDetails" + ) + } + /// Descriptors for all methods in the "spark.connect.SparkConnectService" service. + internal static let descriptors: [GRPCCore.MethodDescriptor] = [ + ExecutePlan.descriptor, + AnalyzePlan.descriptor, + Config.descriptor, + AddArtifacts.descriptor, + ArtifactStatus.descriptor, + Interrupt.descriptor, + ReattachExecute.descriptor, + ReleaseExecute.descriptor, + ReleaseSession.descriptor, + FetchErrorDetails.descriptor + ] + } +} + +extension GRPCCore.ServiceDescriptor { + /// Service descriptor for the "spark.connect.SparkConnectService" service. + internal static let spark_connect_SparkConnectService = GRPCCore.ServiceDescriptor(fullyQualifiedService: "spark.connect.SparkConnectService") +} + +// MARK: spark.connect.SparkConnectService (server) + +extension Spark_Connect_SparkConnectService { + /// Streaming variant of the service protocol for the "spark.connect.SparkConnectService" service. + /// + /// This protocol is the lowest-level of the service protocols generated for this service + /// giving you the most flexibility over the implementation of your service. This comes at + /// the cost of more verbose and less strict APIs. Each RPC requires you to implement it in + /// terms of a request stream and response stream. Where only a single request or response + /// message is expected, you are responsible for enforcing this invariant is maintained. + /// + /// Where possible, prefer using the stricter, less-verbose ``ServiceProtocol`` + /// or ``SimpleServiceProtocol`` instead. + /// + /// > Source IDL Documentation: + /// > + /// > Main interface for the SparkConnect service. + internal protocol StreamingServiceProtocol: GRPCCore.RegistrableRPCService { + /// Handle the "ExecutePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Executes a request that contains the query and returns a stream of [[Response]]. + /// > + /// > It is guaranteed that there is at least one ARROW batch returned even if the result set is empty. + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_ExecutePlanRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_ExecutePlanResponse` messages. + func executePlan( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "AnalyzePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Analyzes a query and returns a [[AnalyzeResponse]] containing metadata about the query. + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_AnalyzePlanRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_AnalyzePlanResponse` messages. + func analyzePlan( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "Config" method. + /// + /// > Source IDL Documentation: + /// > + /// > Update or fetch the configurations and returns a [[ConfigResponse]] containing the result. + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_ConfigRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_ConfigResponse` messages. + func config( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "AddArtifacts" method. + /// + /// > Source IDL Documentation: + /// > + /// > Add artifacts to the session and returns a [[AddArtifactsResponse]] containing metadata about + /// > the added artifacts. + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_AddArtifactsRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_AddArtifactsResponse` messages. + func addArtifacts( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "ArtifactStatus" method. + /// + /// > Source IDL Documentation: + /// > + /// > Check statuses of artifacts in the session and returns them in a [[ArtifactStatusesResponse]] + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_ArtifactStatusesRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_ArtifactStatusesResponse` messages. + func artifactStatus( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "Interrupt" method. + /// + /// > Source IDL Documentation: + /// > + /// > Interrupts running executions + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_InterruptRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_InterruptResponse` messages. + func interrupt( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "ReattachExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Reattach to an existing reattachable execution. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > If the ExecutePlanResponse stream ends without a ResultComplete message, there is more to + /// > continue. If there is a ResultComplete, the client should use ReleaseExecute with + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_ReattachExecuteRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_ExecutePlanResponse` messages. + func reattachExecute( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "ReleaseExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release an reattachable execution, or parts thereof. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > Non reattachable executions are released automatically and immediately after the ExecutePlan + /// > RPC and ReleaseExecute may not be used. + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_ReleaseExecuteRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_ReleaseExecuteResponse` messages. + func releaseExecute( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "ReleaseSession" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release a session. + /// > All the executions in the session will be released. Any further requests for the session with + /// > that session_id for the given user_id will fail. If the session didn't exist or was already + /// > released, this is a noop. + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_ReleaseSessionRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_ReleaseSessionResponse` messages. + func releaseSession( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "FetchErrorDetails" method. + /// + /// > Source IDL Documentation: + /// > + /// > FetchErrorDetails retrieves the matched exception with details based on a provided error id. + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_FetchErrorDetailsRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_FetchErrorDetailsResponse` messages. + func fetchErrorDetails( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + } + + /// Service protocol for the "spark.connect.SparkConnectService" service. + /// + /// This protocol is higher level than ``StreamingServiceProtocol`` but lower level than + /// the ``SimpleServiceProtocol``, it provides access to request and response metadata and + /// trailing response metadata. If you don't need these then consider using + /// the ``SimpleServiceProtocol``. If you need fine grained control over your RPCs then + /// use ``StreamingServiceProtocol``. + /// + /// > Source IDL Documentation: + /// > + /// > Main interface for the SparkConnect service. + internal protocol ServiceProtocol: Spark_Connect_SparkConnectService.StreamingServiceProtocol { + /// Handle the "ExecutePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Executes a request that contains the query and returns a stream of [[Response]]. + /// > + /// > It is guaranteed that there is at least one ARROW batch returned even if the result set is empty. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ExecutePlanRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_ExecutePlanResponse` messages. + func executePlan( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "AnalyzePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Analyzes a query and returns a [[AnalyzeResponse]] containing metadata about the query. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_AnalyzePlanRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A response containing a single `Spark_Connect_AnalyzePlanResponse` message. + func analyzePlan( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse + + /// Handle the "Config" method. + /// + /// > Source IDL Documentation: + /// > + /// > Update or fetch the configurations and returns a [[ConfigResponse]] containing the result. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ConfigRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A response containing a single `Spark_Connect_ConfigResponse` message. + func config( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse + + /// Handle the "AddArtifacts" method. + /// + /// > Source IDL Documentation: + /// > + /// > Add artifacts to the session and returns a [[AddArtifactsResponse]] containing metadata about + /// > the added artifacts. + /// + /// - Parameters: + /// - request: A streaming request of `Spark_Connect_AddArtifactsRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A response containing a single `Spark_Connect_AddArtifactsResponse` message. + func addArtifacts( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse + + /// Handle the "ArtifactStatus" method. + /// + /// > Source IDL Documentation: + /// > + /// > Check statuses of artifacts in the session and returns them in a [[ArtifactStatusesResponse]] + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ArtifactStatusesRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A response containing a single `Spark_Connect_ArtifactStatusesResponse` message. + func artifactStatus( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse + + /// Handle the "Interrupt" method. + /// + /// > Source IDL Documentation: + /// > + /// > Interrupts running executions + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_InterruptRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A response containing a single `Spark_Connect_InterruptResponse` message. + func interrupt( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse + + /// Handle the "ReattachExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Reattach to an existing reattachable execution. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > If the ExecutePlanResponse stream ends without a ResultComplete message, there is more to + /// > continue. If there is a ResultComplete, the client should use ReleaseExecute with + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReattachExecuteRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A streaming response of `Spark_Connect_ExecutePlanResponse` messages. + func reattachExecute( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse + + /// Handle the "ReleaseExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release an reattachable execution, or parts thereof. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > Non reattachable executions are released automatically and immediately after the ExecutePlan + /// > RPC and ReleaseExecute may not be used. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReleaseExecuteRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A response containing a single `Spark_Connect_ReleaseExecuteResponse` message. + func releaseExecute( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse + + /// Handle the "ReleaseSession" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release a session. + /// > All the executions in the session will be released. Any further requests for the session with + /// > that session_id for the given user_id will fail. If the session didn't exist or was already + /// > released, this is a noop. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReleaseSessionRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A response containing a single `Spark_Connect_ReleaseSessionResponse` message. + func releaseSession( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse + + /// Handle the "FetchErrorDetails" method. + /// + /// > Source IDL Documentation: + /// > + /// > FetchErrorDetails retrieves the matched exception with details based on a provided error id. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_FetchErrorDetailsRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A response containing a single `Spark_Connect_FetchErrorDetailsResponse` message. + func fetchErrorDetails( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse + } + + /// Simple service protocol for the "spark.connect.SparkConnectService" service. + /// + /// This is the highest level protocol for the service. The API is the easiest to use but + /// doesn't provide access to request or response metadata. If you need access to these + /// then use ``ServiceProtocol`` instead. + /// + /// > Source IDL Documentation: + /// > + /// > Main interface for the SparkConnect service. + internal protocol SimpleServiceProtocol: Spark_Connect_SparkConnectService.ServiceProtocol { + /// Handle the "ExecutePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Executes a request that contains the query and returns a stream of [[Response]]. + /// > + /// > It is guaranteed that there is at least one ARROW batch returned even if the result set is empty. + /// + /// - Parameters: + /// - request: A `Spark_Connect_ExecutePlanRequest` message. + /// - response: A response stream of `Spark_Connect_ExecutePlanResponse` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + func executePlan( + request: Spark_Connect_ExecutePlanRequest, + response: GRPCCore.RPCWriter, + context: GRPCCore.ServerContext + ) async throws + + /// Handle the "AnalyzePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Analyzes a query and returns a [[AnalyzeResponse]] containing metadata about the query. + /// + /// - Parameters: + /// - request: A `Spark_Connect_AnalyzePlanRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A `Spark_Connect_AnalyzePlanResponse` to respond with. + func analyzePlan( + request: Spark_Connect_AnalyzePlanRequest, + context: GRPCCore.ServerContext + ) async throws -> Spark_Connect_AnalyzePlanResponse + + /// Handle the "Config" method. + /// + /// > Source IDL Documentation: + /// > + /// > Update or fetch the configurations and returns a [[ConfigResponse]] containing the result. + /// + /// - Parameters: + /// - request: A `Spark_Connect_ConfigRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A `Spark_Connect_ConfigResponse` to respond with. + func config( + request: Spark_Connect_ConfigRequest, + context: GRPCCore.ServerContext + ) async throws -> Spark_Connect_ConfigResponse + + /// Handle the "AddArtifacts" method. + /// + /// > Source IDL Documentation: + /// > + /// > Add artifacts to the session and returns a [[AddArtifactsResponse]] containing metadata about + /// > the added artifacts. + /// + /// - Parameters: + /// - request: A stream of `Spark_Connect_AddArtifactsRequest` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A `Spark_Connect_AddArtifactsResponse` to respond with. + func addArtifacts( + request: GRPCCore.RPCAsyncSequence, + context: GRPCCore.ServerContext + ) async throws -> Spark_Connect_AddArtifactsResponse + + /// Handle the "ArtifactStatus" method. + /// + /// > Source IDL Documentation: + /// > + /// > Check statuses of artifacts in the session and returns them in a [[ArtifactStatusesResponse]] + /// + /// - Parameters: + /// - request: A `Spark_Connect_ArtifactStatusesRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A `Spark_Connect_ArtifactStatusesResponse` to respond with. + func artifactStatus( + request: Spark_Connect_ArtifactStatusesRequest, + context: GRPCCore.ServerContext + ) async throws -> Spark_Connect_ArtifactStatusesResponse + + /// Handle the "Interrupt" method. + /// + /// > Source IDL Documentation: + /// > + /// > Interrupts running executions + /// + /// - Parameters: + /// - request: A `Spark_Connect_InterruptRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A `Spark_Connect_InterruptResponse` to respond with. + func interrupt( + request: Spark_Connect_InterruptRequest, + context: GRPCCore.ServerContext + ) async throws -> Spark_Connect_InterruptResponse + + /// Handle the "ReattachExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Reattach to an existing reattachable execution. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > If the ExecutePlanResponse stream ends without a ResultComplete message, there is more to + /// > continue. If there is a ResultComplete, the client should use ReleaseExecute with + /// + /// - Parameters: + /// - request: A `Spark_Connect_ReattachExecuteRequest` message. + /// - response: A response stream of `Spark_Connect_ExecutePlanResponse` messages. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + func reattachExecute( + request: Spark_Connect_ReattachExecuteRequest, + response: GRPCCore.RPCWriter, + context: GRPCCore.ServerContext + ) async throws + + /// Handle the "ReleaseExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release an reattachable execution, or parts thereof. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > Non reattachable executions are released automatically and immediately after the ExecutePlan + /// > RPC and ReleaseExecute may not be used. + /// + /// - Parameters: + /// - request: A `Spark_Connect_ReleaseExecuteRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A `Spark_Connect_ReleaseExecuteResponse` to respond with. + func releaseExecute( + request: Spark_Connect_ReleaseExecuteRequest, + context: GRPCCore.ServerContext + ) async throws -> Spark_Connect_ReleaseExecuteResponse + + /// Handle the "ReleaseSession" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release a session. + /// > All the executions in the session will be released. Any further requests for the session with + /// > that session_id for the given user_id will fail. If the session didn't exist or was already + /// > released, this is a noop. + /// + /// - Parameters: + /// - request: A `Spark_Connect_ReleaseSessionRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A `Spark_Connect_ReleaseSessionResponse` to respond with. + func releaseSession( + request: Spark_Connect_ReleaseSessionRequest, + context: GRPCCore.ServerContext + ) async throws -> Spark_Connect_ReleaseSessionResponse + + /// Handle the "FetchErrorDetails" method. + /// + /// > Source IDL Documentation: + /// > + /// > FetchErrorDetails retrieves the matched exception with details based on a provided error id. + /// + /// - Parameters: + /// - request: A `Spark_Connect_FetchErrorDetailsRequest` message. + /// - context: Context providing information about the RPC. + /// - Throws: Any error which occurred during the processing of the request. Thrown errors + /// of type `RPCError` are mapped to appropriate statuses. All other errors are converted + /// to an internal error. + /// - Returns: A `Spark_Connect_FetchErrorDetailsResponse` to respond with. + func fetchErrorDetails( + request: Spark_Connect_FetchErrorDetailsRequest, + context: GRPCCore.ServerContext + ) async throws -> Spark_Connect_FetchErrorDetailsResponse + } +} + +// Default implementation of 'registerMethods(with:)'. +extension Spark_Connect_SparkConnectService.StreamingServiceProtocol { + internal func registerMethods(with router: inout GRPCCore.RPCRouter) where Transport: GRPCCore.ServerTransport { + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.ExecutePlan.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.executePlan( + request: request, + context: context + ) + } + ) + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.AnalyzePlan.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.analyzePlan( + request: request, + context: context + ) + } + ) + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.Config.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.config( + request: request, + context: context + ) + } + ) + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.AddArtifacts.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.addArtifacts( + request: request, + context: context + ) + } + ) + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.ArtifactStatus.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.artifactStatus( + request: request, + context: context + ) + } + ) + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.Interrupt.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.interrupt( + request: request, + context: context + ) + } + ) + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.ReattachExecute.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.reattachExecute( + request: request, + context: context + ) + } + ) + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.ReleaseExecute.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.releaseExecute( + request: request, + context: context + ) + } + ) + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.ReleaseSession.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.releaseSession( + request: request, + context: context + ) + } + ) + router.registerHandler( + forMethod: Spark_Connect_SparkConnectService.Method.FetchErrorDetails.descriptor, + deserializer: GRPCProtobuf.ProtobufDeserializer(), + serializer: GRPCProtobuf.ProtobufSerializer(), + handler: { request, context in + try await self.fetchErrorDetails( + request: request, + context: context + ) + } + ) + } +} + +// Default implementation of streaming methods from 'StreamingServiceProtocol'. +extension Spark_Connect_SparkConnectService.ServiceProtocol { + internal func executePlan( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.executePlan( + request: GRPCCore.ServerRequest(stream: request), + context: context + ) + return response + } + + internal func analyzePlan( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.analyzePlan( + request: GRPCCore.ServerRequest(stream: request), + context: context + ) + return GRPCCore.StreamingServerResponse(single: response) + } + + internal func config( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.config( + request: GRPCCore.ServerRequest(stream: request), + context: context + ) + return GRPCCore.StreamingServerResponse(single: response) + } + + internal func addArtifacts( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.addArtifacts( + request: request, + context: context + ) + return GRPCCore.StreamingServerResponse(single: response) + } + + internal func artifactStatus( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.artifactStatus( + request: GRPCCore.ServerRequest(stream: request), + context: context + ) + return GRPCCore.StreamingServerResponse(single: response) + } + + internal func interrupt( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.interrupt( + request: GRPCCore.ServerRequest(stream: request), + context: context + ) + return GRPCCore.StreamingServerResponse(single: response) + } + + internal func reattachExecute( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.reattachExecute( + request: GRPCCore.ServerRequest(stream: request), + context: context + ) + return response + } + + internal func releaseExecute( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.releaseExecute( + request: GRPCCore.ServerRequest(stream: request), + context: context + ) + return GRPCCore.StreamingServerResponse(single: response) + } + + internal func releaseSession( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.releaseSession( + request: GRPCCore.ServerRequest(stream: request), + context: context + ) + return GRPCCore.StreamingServerResponse(single: response) + } + + internal func fetchErrorDetails( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + let response = try await self.fetchErrorDetails( + request: GRPCCore.ServerRequest(stream: request), + context: context + ) + return GRPCCore.StreamingServerResponse(single: response) + } +} + +// Default implementation of methods from 'ServiceProtocol'. +extension Spark_Connect_SparkConnectService.SimpleServiceProtocol { + internal func executePlan( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + return GRPCCore.StreamingServerResponse( + metadata: [:], + producer: { writer in + try await self.executePlan( + request: request.message, + response: writer, + context: context + ) + return [:] + } + ) + } + + internal func analyzePlan( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse { + return GRPCCore.ServerResponse( + message: try await self.analyzePlan( + request: request.message, + context: context + ), + metadata: [:] + ) + } + + internal func config( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse { + return GRPCCore.ServerResponse( + message: try await self.config( + request: request.message, + context: context + ), + metadata: [:] + ) + } + + internal func addArtifacts( + request: GRPCCore.StreamingServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse { + return GRPCCore.ServerResponse( + message: try await self.addArtifacts( + request: request.messages, + context: context + ), + metadata: [:] + ) + } + + internal func artifactStatus( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse { + return GRPCCore.ServerResponse( + message: try await self.artifactStatus( + request: request.message, + context: context + ), + metadata: [:] + ) + } + + internal func interrupt( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse { + return GRPCCore.ServerResponse( + message: try await self.interrupt( + request: request.message, + context: context + ), + metadata: [:] + ) + } + + internal func reattachExecute( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.StreamingServerResponse { + return GRPCCore.StreamingServerResponse( + metadata: [:], + producer: { writer in + try await self.reattachExecute( + request: request.message, + response: writer, + context: context + ) + return [:] + } + ) + } + + internal func releaseExecute( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse { + return GRPCCore.ServerResponse( + message: try await self.releaseExecute( + request: request.message, + context: context + ), + metadata: [:] + ) + } + + internal func releaseSession( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse { + return GRPCCore.ServerResponse( + message: try await self.releaseSession( + request: request.message, + context: context + ), + metadata: [:] + ) + } + + internal func fetchErrorDetails( + request: GRPCCore.ServerRequest, + context: GRPCCore.ServerContext + ) async throws -> GRPCCore.ServerResponse { + return GRPCCore.ServerResponse( + message: try await self.fetchErrorDetails( + request: request.message, + context: context + ), + metadata: [:] + ) + } +} + +// MARK: spark.connect.SparkConnectService (client) + +extension Spark_Connect_SparkConnectService { + /// Generated client protocol for the "spark.connect.SparkConnectService" service. + /// + /// You don't need to implement this protocol directly, use the generated + /// implementation, ``Client``. + /// + /// > Source IDL Documentation: + /// > + /// > Main interface for the SparkConnect service. + internal protocol ClientProtocol: Sendable { + /// Call the "ExecutePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Executes a request that contains the query and returns a stream of [[Response]]. + /// > + /// > It is guaranteed that there is at least one ARROW batch returned even if the result set is empty. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ExecutePlanRequest` message. + /// - serializer: A serializer for `Spark_Connect_ExecutePlanRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ExecutePlanResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func executePlan( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.StreamingClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + + /// Call the "AnalyzePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Analyzes a query and returns a [[AnalyzeResponse]] containing metadata about the query. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_AnalyzePlanRequest` message. + /// - serializer: A serializer for `Spark_Connect_AnalyzePlanRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_AnalyzePlanResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func analyzePlan( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + + /// Call the "Config" method. + /// + /// > Source IDL Documentation: + /// > + /// > Update or fetch the configurations and returns a [[ConfigResponse]] containing the result. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ConfigRequest` message. + /// - serializer: A serializer for `Spark_Connect_ConfigRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ConfigResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func config( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + + /// Call the "AddArtifacts" method. + /// + /// > Source IDL Documentation: + /// > + /// > Add artifacts to the session and returns a [[AddArtifactsResponse]] containing metadata about + /// > the added artifacts. + /// + /// - Parameters: + /// - request: A streaming request producing `Spark_Connect_AddArtifactsRequest` messages. + /// - serializer: A serializer for `Spark_Connect_AddArtifactsRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_AddArtifactsResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func addArtifacts( + request: GRPCCore.StreamingClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + + /// Call the "ArtifactStatus" method. + /// + /// > Source IDL Documentation: + /// > + /// > Check statuses of artifacts in the session and returns them in a [[ArtifactStatusesResponse]] + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ArtifactStatusesRequest` message. + /// - serializer: A serializer for `Spark_Connect_ArtifactStatusesRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ArtifactStatusesResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func artifactStatus( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + + /// Call the "Interrupt" method. + /// + /// > Source IDL Documentation: + /// > + /// > Interrupts running executions + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_InterruptRequest` message. + /// - serializer: A serializer for `Spark_Connect_InterruptRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_InterruptResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func interrupt( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + + /// Call the "ReattachExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Reattach to an existing reattachable execution. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > If the ExecutePlanResponse stream ends without a ResultComplete message, there is more to + /// > continue. If there is a ResultComplete, the client should use ReleaseExecute with + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReattachExecuteRequest` message. + /// - serializer: A serializer for `Spark_Connect_ReattachExecuteRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ExecutePlanResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func reattachExecute( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.StreamingClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + + /// Call the "ReleaseExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release an reattachable execution, or parts thereof. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > Non reattachable executions are released automatically and immediately after the ExecutePlan + /// > RPC and ReleaseExecute may not be used. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReleaseExecuteRequest` message. + /// - serializer: A serializer for `Spark_Connect_ReleaseExecuteRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ReleaseExecuteResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func releaseExecute( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + + /// Call the "ReleaseSession" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release a session. + /// > All the executions in the session will be released. Any further requests for the session with + /// > that session_id for the given user_id will fail. If the session didn't exist or was already + /// > released, this is a noop. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReleaseSessionRequest` message. + /// - serializer: A serializer for `Spark_Connect_ReleaseSessionRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ReleaseSessionResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func releaseSession( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + + /// Call the "FetchErrorDetails" method. + /// + /// > Source IDL Documentation: + /// > + /// > FetchErrorDetails retrieves the matched exception with details based on a provided error id. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_FetchErrorDetailsRequest` message. + /// - serializer: A serializer for `Spark_Connect_FetchErrorDetailsRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_FetchErrorDetailsResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + func fetchErrorDetails( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable + } + + /// Generated client for the "spark.connect.SparkConnectService" service. + /// + /// The ``Client`` provides an implementation of ``ClientProtocol`` which wraps + /// a `GRPCCore.GRPCCClient`. The underlying `GRPCClient` provides the long-lived + /// means of communication with the remote peer. + /// + /// > Source IDL Documentation: + /// > + /// > Main interface for the SparkConnect service. + internal struct Client: ClientProtocol where Transport: GRPCCore.ClientTransport { + private let client: GRPCCore.GRPCClient + + /// Creates a new client wrapping the provided `GRPCCore.GRPCClient`. + /// + /// - Parameters: + /// - client: A `GRPCCore.GRPCClient` providing a communication channel to the service. + internal init(wrapping client: GRPCCore.GRPCClient) { + self.client = client + } + + /// Call the "ExecutePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Executes a request that contains the query and returns a stream of [[Response]]. + /// > + /// > It is guaranteed that there is at least one ARROW batch returned even if the result set is empty. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ExecutePlanRequest` message. + /// - serializer: A serializer for `Spark_Connect_ExecutePlanRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ExecutePlanResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func executePlan( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.StreamingClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable { + try await self.client.serverStreaming( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.ExecutePlan.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "AnalyzePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Analyzes a query and returns a [[AnalyzeResponse]] containing metadata about the query. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_AnalyzePlanRequest` message. + /// - serializer: A serializer for `Spark_Connect_AnalyzePlanRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_AnalyzePlanResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func analyzePlan( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.client.unary( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.AnalyzePlan.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "Config" method. + /// + /// > Source IDL Documentation: + /// > + /// > Update or fetch the configurations and returns a [[ConfigResponse]] containing the result. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ConfigRequest` message. + /// - serializer: A serializer for `Spark_Connect_ConfigRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ConfigResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func config( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.client.unary( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.Config.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "AddArtifacts" method. + /// + /// > Source IDL Documentation: + /// > + /// > Add artifacts to the session and returns a [[AddArtifactsResponse]] containing metadata about + /// > the added artifacts. + /// + /// - Parameters: + /// - request: A streaming request producing `Spark_Connect_AddArtifactsRequest` messages. + /// - serializer: A serializer for `Spark_Connect_AddArtifactsRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_AddArtifactsResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func addArtifacts( + request: GRPCCore.StreamingClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.client.clientStreaming( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.AddArtifacts.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ArtifactStatus" method. + /// + /// > Source IDL Documentation: + /// > + /// > Check statuses of artifacts in the session and returns them in a [[ArtifactStatusesResponse]] + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ArtifactStatusesRequest` message. + /// - serializer: A serializer for `Spark_Connect_ArtifactStatusesRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ArtifactStatusesResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func artifactStatus( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.client.unary( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.ArtifactStatus.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "Interrupt" method. + /// + /// > Source IDL Documentation: + /// > + /// > Interrupts running executions + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_InterruptRequest` message. + /// - serializer: A serializer for `Spark_Connect_InterruptRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_InterruptResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func interrupt( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.client.unary( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.Interrupt.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ReattachExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Reattach to an existing reattachable execution. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > If the ExecutePlanResponse stream ends without a ResultComplete message, there is more to + /// > continue. If there is a ResultComplete, the client should use ReleaseExecute with + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReattachExecuteRequest` message. + /// - serializer: A serializer for `Spark_Connect_ReattachExecuteRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ExecutePlanResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func reattachExecute( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.StreamingClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable { + try await self.client.serverStreaming( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.ReattachExecute.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ReleaseExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release an reattachable execution, or parts thereof. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > Non reattachable executions are released automatically and immediately after the ExecutePlan + /// > RPC and ReleaseExecute may not be used. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReleaseExecuteRequest` message. + /// - serializer: A serializer for `Spark_Connect_ReleaseExecuteRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ReleaseExecuteResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func releaseExecute( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.client.unary( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.ReleaseExecute.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ReleaseSession" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release a session. + /// > All the executions in the session will be released. Any further requests for the session with + /// > that session_id for the given user_id will fail. If the session didn't exist or was already + /// > released, this is a noop. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReleaseSessionRequest` message. + /// - serializer: A serializer for `Spark_Connect_ReleaseSessionRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_ReleaseSessionResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func releaseSession( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.client.unary( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.ReleaseSession.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "FetchErrorDetails" method. + /// + /// > Source IDL Documentation: + /// > + /// > FetchErrorDetails retrieves the matched exception with details based on a provided error id. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_FetchErrorDetailsRequest` message. + /// - serializer: A serializer for `Spark_Connect_FetchErrorDetailsRequest` messages. + /// - deserializer: A deserializer for `Spark_Connect_FetchErrorDetailsResponse` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func fetchErrorDetails( + request: GRPCCore.ClientRequest, + serializer: some GRPCCore.MessageSerializer, + deserializer: some GRPCCore.MessageDeserializer, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.client.unary( + request: request, + descriptor: Spark_Connect_SparkConnectService.Method.FetchErrorDetails.descriptor, + serializer: serializer, + deserializer: deserializer, + options: options, + onResponse: handleResponse + ) + } + } +} + +// Helpers providing default arguments to 'ClientProtocol' methods. +extension Spark_Connect_SparkConnectService.ClientProtocol { + /// Call the "ExecutePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Executes a request that contains the query and returns a stream of [[Response]]. + /// > + /// > It is guaranteed that there is at least one ARROW batch returned even if the result set is empty. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ExecutePlanRequest` message. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func executePlan( + request: GRPCCore.ClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.StreamingClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable { + try await self.executePlan( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } + + /// Call the "AnalyzePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Analyzes a query and returns a [[AnalyzeResponse]] containing metadata about the query. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_AnalyzePlanRequest` message. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func analyzePlan( + request: GRPCCore.ClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.analyzePlan( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } + + /// Call the "Config" method. + /// + /// > Source IDL Documentation: + /// > + /// > Update or fetch the configurations and returns a [[ConfigResponse]] containing the result. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ConfigRequest` message. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func config( + request: GRPCCore.ClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.config( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } + + /// Call the "AddArtifacts" method. + /// + /// > Source IDL Documentation: + /// > + /// > Add artifacts to the session and returns a [[AddArtifactsResponse]] containing metadata about + /// > the added artifacts. + /// + /// - Parameters: + /// - request: A streaming request producing `Spark_Connect_AddArtifactsRequest` messages. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func addArtifacts( + request: GRPCCore.StreamingClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.addArtifacts( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ArtifactStatus" method. + /// + /// > Source IDL Documentation: + /// > + /// > Check statuses of artifacts in the session and returns them in a [[ArtifactStatusesResponse]] + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ArtifactStatusesRequest` message. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func artifactStatus( + request: GRPCCore.ClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.artifactStatus( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } + + /// Call the "Interrupt" method. + /// + /// > Source IDL Documentation: + /// > + /// > Interrupts running executions + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_InterruptRequest` message. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func interrupt( + request: GRPCCore.ClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.interrupt( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ReattachExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Reattach to an existing reattachable execution. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > If the ExecutePlanResponse stream ends without a ResultComplete message, there is more to + /// > continue. If there is a ResultComplete, the client should use ReleaseExecute with + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReattachExecuteRequest` message. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func reattachExecute( + request: GRPCCore.ClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.StreamingClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable { + try await self.reattachExecute( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ReleaseExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release an reattachable execution, or parts thereof. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > Non reattachable executions are released automatically and immediately after the ExecutePlan + /// > RPC and ReleaseExecute may not be used. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReleaseExecuteRequest` message. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func releaseExecute( + request: GRPCCore.ClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.releaseExecute( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ReleaseSession" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release a session. + /// > All the executions in the session will be released. Any further requests for the session with + /// > that session_id for the given user_id will fail. If the session didn't exist or was already + /// > released, this is a noop. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_ReleaseSessionRequest` message. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func releaseSession( + request: GRPCCore.ClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.releaseSession( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } + + /// Call the "FetchErrorDetails" method. + /// + /// > Source IDL Documentation: + /// > + /// > FetchErrorDetails retrieves the matched exception with details based on a provided error id. + /// + /// - Parameters: + /// - request: A request containing a single `Spark_Connect_FetchErrorDetailsRequest` message. + /// - options: Options to apply to this RPC. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func fetchErrorDetails( + request: GRPCCore.ClientRequest, + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + try await self.fetchErrorDetails( + request: request, + serializer: GRPCProtobuf.ProtobufSerializer(), + deserializer: GRPCProtobuf.ProtobufDeserializer(), + options: options, + onResponse: handleResponse + ) + } +} + +// Helpers providing sugared APIs for 'ClientProtocol' methods. +extension Spark_Connect_SparkConnectService.ClientProtocol { + /// Call the "ExecutePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Executes a request that contains the query and returns a stream of [[Response]]. + /// > + /// > It is guaranteed that there is at least one ARROW batch returned even if the result set is empty. + /// + /// - Parameters: + /// - message: request message to send. + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func executePlan( + _ message: Spark_Connect_ExecutePlanRequest, + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.StreamingClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.ClientRequest( + message: message, + metadata: metadata + ) + return try await self.executePlan( + request: request, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "AnalyzePlan" method. + /// + /// > Source IDL Documentation: + /// > + /// > Analyzes a query and returns a [[AnalyzeResponse]] containing metadata about the query. + /// + /// - Parameters: + /// - message: request message to send. + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func analyzePlan( + _ message: Spark_Connect_AnalyzePlanRequest, + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.ClientRequest( + message: message, + metadata: metadata + ) + return try await self.analyzePlan( + request: request, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "Config" method. + /// + /// > Source IDL Documentation: + /// > + /// > Update or fetch the configurations and returns a [[ConfigResponse]] containing the result. + /// + /// - Parameters: + /// - message: request message to send. + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func config( + _ message: Spark_Connect_ConfigRequest, + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.ClientRequest( + message: message, + metadata: metadata + ) + return try await self.config( + request: request, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "AddArtifacts" method. + /// + /// > Source IDL Documentation: + /// > + /// > Add artifacts to the session and returns a [[AddArtifactsResponse]] containing metadata about + /// > the added artifacts. + /// + /// - Parameters: + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - producer: A closure producing request messages to send to the server. The request + /// stream is closed when the closure returns. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func addArtifacts( + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + requestProducer producer: @Sendable @escaping (GRPCCore.RPCWriter) async throws -> Void, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.StreamingClientRequest( + metadata: metadata, + producer: producer + ) + return try await self.addArtifacts( + request: request, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ArtifactStatus" method. + /// + /// > Source IDL Documentation: + /// > + /// > Check statuses of artifacts in the session and returns them in a [[ArtifactStatusesResponse]] + /// + /// - Parameters: + /// - message: request message to send. + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func artifactStatus( + _ message: Spark_Connect_ArtifactStatusesRequest, + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.ClientRequest( + message: message, + metadata: metadata + ) + return try await self.artifactStatus( + request: request, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "Interrupt" method. + /// + /// > Source IDL Documentation: + /// > + /// > Interrupts running executions + /// + /// - Parameters: + /// - message: request message to send. + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func interrupt( + _ message: Spark_Connect_InterruptRequest, + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.ClientRequest( + message: message, + metadata: metadata + ) + return try await self.interrupt( + request: request, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ReattachExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Reattach to an existing reattachable execution. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > If the ExecutePlanResponse stream ends without a ResultComplete message, there is more to + /// > continue. If there is a ResultComplete, the client should use ReleaseExecute with + /// + /// - Parameters: + /// - message: request message to send. + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func reattachExecute( + _ message: Spark_Connect_ReattachExecuteRequest, + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.StreamingClientResponse) async throws -> Result + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.ClientRequest( + message: message, + metadata: metadata + ) + return try await self.reattachExecute( + request: request, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ReleaseExecute" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release an reattachable execution, or parts thereof. + /// > The ExecutePlan must have been started with ReattachOptions.reattachable=true. + /// > Non reattachable executions are released automatically and immediately after the ExecutePlan + /// > RPC and ReleaseExecute may not be used. + /// + /// - Parameters: + /// - message: request message to send. + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func releaseExecute( + _ message: Spark_Connect_ReleaseExecuteRequest, + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.ClientRequest( + message: message, + metadata: metadata + ) + return try await self.releaseExecute( + request: request, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "ReleaseSession" method. + /// + /// > Source IDL Documentation: + /// > + /// > Release a session. + /// > All the executions in the session will be released. Any further requests for the session with + /// > that session_id for the given user_id will fail. If the session didn't exist or was already + /// > released, this is a noop. + /// + /// - Parameters: + /// - message: request message to send. + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func releaseSession( + _ message: Spark_Connect_ReleaseSessionRequest, + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.ClientRequest( + message: message, + metadata: metadata + ) + return try await self.releaseSession( + request: request, + options: options, + onResponse: handleResponse + ) + } + + /// Call the "FetchErrorDetails" method. + /// + /// > Source IDL Documentation: + /// > + /// > FetchErrorDetails retrieves the matched exception with details based on a provided error id. + /// + /// - Parameters: + /// - message: request message to send. + /// - metadata: Additional metadata to send, defaults to empty. + /// - options: Options to apply to this RPC, defaults to `.defaults`. + /// - handleResponse: A closure which handles the response, the result of which is + /// returned to the caller. Returning from the closure will cancel the RPC if it + /// hasn't already finished. + /// - Returns: The result of `handleResponse`. + internal func fetchErrorDetails( + _ message: Spark_Connect_FetchErrorDetailsRequest, + metadata: GRPCCore.Metadata = [:], + options: GRPCCore.CallOptions = .defaults, + onResponse handleResponse: @Sendable @escaping (GRPCCore.ClientResponse) async throws -> Result = { response in + try response.message + } + ) async throws -> Result where Result: Sendable { + let request = GRPCCore.ClientRequest( + message: message, + metadata: metadata + ) + return try await self.fetchErrorDetails( + request: request, + options: options, + onResponse: handleResponse + ) + } +} \ No newline at end of file diff --git a/Sources/SparkConnect/base.pb.swift b/Sources/SparkConnect/base.pb.swift new file mode 100644 index 0000000..d41a9bc --- /dev/null +++ b/Sources/SparkConnect/base.pb.swift @@ -0,0 +1,7606 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/base.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +/// A [[Plan]] is the structure that carries the runtime information for the execution from the +/// client to the server. A [[Plan]] can either be of the type [[Relation]] which is a reference +/// to the underlying logical plan or it can be of the [[Command]] type that is used to execute +/// commands on the server. +struct Spark_Connect_Plan: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var opType: Spark_Connect_Plan.OneOf_OpType? = nil + + var root: Spark_Connect_Relation { + get { + if case .root(let v)? = opType {return v} + return Spark_Connect_Relation() + } + set {opType = .root(newValue)} + } + + var command: Spark_Connect_Command { + get { + if case .command(let v)? = opType {return v} + return Spark_Connect_Command() + } + set {opType = .command(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_OpType: Equatable, Sendable { + case root(Spark_Connect_Relation) + case command(Spark_Connect_Command) + + } + + init() {} +} + +/// User Context is used to refer to one particular user session that is executing +/// queries in the backend. +struct Spark_Connect_UserContext: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var userID: String = String() + + var userName: String = String() + + /// To extend the existing user context message that is used to identify incoming requests, + /// Spark Connect leverages the Any protobuf type that can be used to inject arbitrary other + /// messages into this message. Extensions are stored as a `repeated` type to be able to + /// handle multiple active extensions. + var extensions: [SwiftProtobuf.Google_Protobuf_Any] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Request to perform plan analyze, optionally to explain the plan. +struct Spark_Connect_AnalyzePlanRequest: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// + /// The session_id specifies a spark session for a user id (which is specified + /// by user_context.user_id). The session_id is set by the client to be able to + /// collate streaming responses from different queries within the dedicated session. + /// The id should be an UUID string of the format `00112233-4455-6677-8899-aabbccddeeff` + var sessionID: String { + get {return _storage._sessionID} + set {_uniqueStorage()._sessionID = newValue} + } + + /// (Optional) + /// + /// Server-side generated idempotency key from the previous responses (if any). Server + /// can use this to validate that the server side session has not changed. + var clientObservedServerSideSessionID: String { + get {return _storage._clientObservedServerSideSessionID ?? String()} + set {_uniqueStorage()._clientObservedServerSideSessionID = newValue} + } + /// Returns true if `clientObservedServerSideSessionID` has been explicitly set. + var hasClientObservedServerSideSessionID: Bool {return _storage._clientObservedServerSideSessionID != nil} + /// Clears the value of `clientObservedServerSideSessionID`. Subsequent reads from it will return its default value. + mutating func clearClientObservedServerSideSessionID() {_uniqueStorage()._clientObservedServerSideSessionID = nil} + + /// (Required) User context + var userContext: Spark_Connect_UserContext { + get {return _storage._userContext ?? Spark_Connect_UserContext()} + set {_uniqueStorage()._userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return _storage._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {_uniqueStorage()._userContext = nil} + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _storage._clientType ?? String()} + set {_uniqueStorage()._clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return _storage._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {_uniqueStorage()._clientType = nil} + + var analyze: OneOf_Analyze? { + get {return _storage._analyze} + set {_uniqueStorage()._analyze = newValue} + } + + var schema: Spark_Connect_AnalyzePlanRequest.Schema { + get { + if case .schema(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.Schema() + } + set {_uniqueStorage()._analyze = .schema(newValue)} + } + + var explain: Spark_Connect_AnalyzePlanRequest.Explain { + get { + if case .explain(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.Explain() + } + set {_uniqueStorage()._analyze = .explain(newValue)} + } + + var treeString: Spark_Connect_AnalyzePlanRequest.TreeString { + get { + if case .treeString(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.TreeString() + } + set {_uniqueStorage()._analyze = .treeString(newValue)} + } + + var isLocal: Spark_Connect_AnalyzePlanRequest.IsLocal { + get { + if case .isLocal(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.IsLocal() + } + set {_uniqueStorage()._analyze = .isLocal(newValue)} + } + + var isStreaming: Spark_Connect_AnalyzePlanRequest.IsStreaming { + get { + if case .isStreaming(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.IsStreaming() + } + set {_uniqueStorage()._analyze = .isStreaming(newValue)} + } + + var inputFiles: Spark_Connect_AnalyzePlanRequest.InputFiles { + get { + if case .inputFiles(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.InputFiles() + } + set {_uniqueStorage()._analyze = .inputFiles(newValue)} + } + + var sparkVersion: Spark_Connect_AnalyzePlanRequest.SparkVersion { + get { + if case .sparkVersion(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.SparkVersion() + } + set {_uniqueStorage()._analyze = .sparkVersion(newValue)} + } + + var ddlParse: Spark_Connect_AnalyzePlanRequest.DDLParse { + get { + if case .ddlParse(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.DDLParse() + } + set {_uniqueStorage()._analyze = .ddlParse(newValue)} + } + + var sameSemantics: Spark_Connect_AnalyzePlanRequest.SameSemantics { + get { + if case .sameSemantics(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.SameSemantics() + } + set {_uniqueStorage()._analyze = .sameSemantics(newValue)} + } + + var semanticHash: Spark_Connect_AnalyzePlanRequest.SemanticHash { + get { + if case .semanticHash(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.SemanticHash() + } + set {_uniqueStorage()._analyze = .semanticHash(newValue)} + } + + var persist: Spark_Connect_AnalyzePlanRequest.Persist { + get { + if case .persist(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.Persist() + } + set {_uniqueStorage()._analyze = .persist(newValue)} + } + + var unpersist: Spark_Connect_AnalyzePlanRequest.Unpersist { + get { + if case .unpersist(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.Unpersist() + } + set {_uniqueStorage()._analyze = .unpersist(newValue)} + } + + var getStorageLevel: Spark_Connect_AnalyzePlanRequest.GetStorageLevel { + get { + if case .getStorageLevel(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.GetStorageLevel() + } + set {_uniqueStorage()._analyze = .getStorageLevel(newValue)} + } + + var jsonToDdl: Spark_Connect_AnalyzePlanRequest.JsonToDDL { + get { + if case .jsonToDdl(let v)? = _storage._analyze {return v} + return Spark_Connect_AnalyzePlanRequest.JsonToDDL() + } + set {_uniqueStorage()._analyze = .jsonToDdl(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Analyze: Equatable, Sendable { + case schema(Spark_Connect_AnalyzePlanRequest.Schema) + case explain(Spark_Connect_AnalyzePlanRequest.Explain) + case treeString(Spark_Connect_AnalyzePlanRequest.TreeString) + case isLocal(Spark_Connect_AnalyzePlanRequest.IsLocal) + case isStreaming(Spark_Connect_AnalyzePlanRequest.IsStreaming) + case inputFiles(Spark_Connect_AnalyzePlanRequest.InputFiles) + case sparkVersion(Spark_Connect_AnalyzePlanRequest.SparkVersion) + case ddlParse(Spark_Connect_AnalyzePlanRequest.DDLParse) + case sameSemantics(Spark_Connect_AnalyzePlanRequest.SameSemantics) + case semanticHash(Spark_Connect_AnalyzePlanRequest.SemanticHash) + case persist(Spark_Connect_AnalyzePlanRequest.Persist) + case unpersist(Spark_Connect_AnalyzePlanRequest.Unpersist) + case getStorageLevel(Spark_Connect_AnalyzePlanRequest.GetStorageLevel) + case jsonToDdl(Spark_Connect_AnalyzePlanRequest.JsonToDDL) + + } + + struct Schema: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to be analyzed. + var plan: Spark_Connect_Plan { + get {return _plan ?? Spark_Connect_Plan()} + set {_plan = newValue} + } + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool {return self._plan != nil} + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() {self._plan = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _plan: Spark_Connect_Plan? = nil + } + + /// Explains the input plan based on a configurable mode. + struct Explain: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to be analyzed. + var plan: Spark_Connect_Plan { + get {return _plan ?? Spark_Connect_Plan()} + set {_plan = newValue} + } + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool {return self._plan != nil} + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() {self._plan = nil} + + /// (Required) For analyzePlan rpc calls, configure the mode to explain plan in strings. + var explainMode: Spark_Connect_AnalyzePlanRequest.Explain.ExplainMode = .unspecified + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// Plan explanation mode. + enum ExplainMode: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + + /// Generates only physical plan. + case simple // = 1 + + /// Generates parsed logical plan, analyzed logical plan, optimized logical plan and physical plan. + /// Parsed Logical plan is a unresolved plan that extracted from the query. Analyzed logical plans + /// transforms which translates unresolvedAttribute and unresolvedRelation into fully typed objects. + /// The optimized logical plan transforms through a set of optimization rules, resulting in the + /// physical plan. + case extended // = 2 + + /// Generates code for the statement, if any and a physical plan. + case codegen // = 3 + + /// If plan node statistics are available, generates a logical plan and also the statistics. + case cost // = 4 + + /// Generates a physical plan outline and also node details. + case formatted // = 5 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .simple + case 2: self = .extended + case 3: self = .codegen + case 4: self = .cost + case 5: self = .formatted + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .simple: return 1 + case .extended: return 2 + case .codegen: return 3 + case .cost: return 4 + case .formatted: return 5 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_AnalyzePlanRequest.Explain.ExplainMode] = [ + .unspecified, + .simple, + .extended, + .codegen, + .cost, + .formatted, + ] + + } + + init() {} + + fileprivate var _plan: Spark_Connect_Plan? = nil + } + + struct TreeString: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to be analyzed. + var plan: Spark_Connect_Plan { + get {return _plan ?? Spark_Connect_Plan()} + set {_plan = newValue} + } + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool {return self._plan != nil} + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() {self._plan = nil} + + /// (Optional) Max level of the schema. + var level: Int32 { + get {return _level ?? 0} + set {_level = newValue} + } + /// Returns true if `level` has been explicitly set. + var hasLevel: Bool {return self._level != nil} + /// Clears the value of `level`. Subsequent reads from it will return its default value. + mutating func clearLevel() {self._level = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _plan: Spark_Connect_Plan? = nil + fileprivate var _level: Int32? = nil + } + + struct IsLocal: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to be analyzed. + var plan: Spark_Connect_Plan { + get {return _plan ?? Spark_Connect_Plan()} + set {_plan = newValue} + } + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool {return self._plan != nil} + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() {self._plan = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _plan: Spark_Connect_Plan? = nil + } + + struct IsStreaming: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to be analyzed. + var plan: Spark_Connect_Plan { + get {return _plan ?? Spark_Connect_Plan()} + set {_plan = newValue} + } + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool {return self._plan != nil} + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() {self._plan = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _plan: Spark_Connect_Plan? = nil + } + + struct InputFiles: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to be analyzed. + var plan: Spark_Connect_Plan { + get {return _plan ?? Spark_Connect_Plan()} + set {_plan = newValue} + } + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool {return self._plan != nil} + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() {self._plan = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _plan: Spark_Connect_Plan? = nil + } + + struct SparkVersion: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct DDLParse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The DDL formatted string to be parsed. + var ddlString: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + /// Returns `true` when the logical query plans are equal and therefore return same results. + struct SameSemantics: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The plan to be compared. + var targetPlan: Spark_Connect_Plan { + get {return _storage._targetPlan ?? Spark_Connect_Plan()} + set {_uniqueStorage()._targetPlan = newValue} + } + /// Returns true if `targetPlan` has been explicitly set. + var hasTargetPlan: Bool {return _storage._targetPlan != nil} + /// Clears the value of `targetPlan`. Subsequent reads from it will return its default value. + mutating func clearTargetPlan() {_uniqueStorage()._targetPlan = nil} + + /// (Required) The other plan to be compared. + var otherPlan: Spark_Connect_Plan { + get {return _storage._otherPlan ?? Spark_Connect_Plan()} + set {_uniqueStorage()._otherPlan = newValue} + } + /// Returns true if `otherPlan` has been explicitly set. + var hasOtherPlan: Bool {return _storage._otherPlan != nil} + /// Clears the value of `otherPlan`. Subsequent reads from it will return its default value. + mutating func clearOtherPlan() {_uniqueStorage()._otherPlan = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + struct SemanticHash: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to get a hashCode. + var plan: Spark_Connect_Plan { + get {return _plan ?? Spark_Connect_Plan()} + set {_plan = newValue} + } + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool {return self._plan != nil} + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() {self._plan = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _plan: Spark_Connect_Plan? = nil + } + + struct Persist: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to persist. + var relation: Spark_Connect_Relation { + get {return _relation ?? Spark_Connect_Relation()} + set {_relation = newValue} + } + /// Returns true if `relation` has been explicitly set. + var hasRelation: Bool {return self._relation != nil} + /// Clears the value of `relation`. Subsequent reads from it will return its default value. + mutating func clearRelation() {self._relation = nil} + + /// (Optional) The storage level. + var storageLevel: Spark_Connect_StorageLevel { + get {return _storageLevel ?? Spark_Connect_StorageLevel()} + set {_storageLevel = newValue} + } + /// Returns true if `storageLevel` has been explicitly set. + var hasStorageLevel: Bool {return self._storageLevel != nil} + /// Clears the value of `storageLevel`. Subsequent reads from it will return its default value. + mutating func clearStorageLevel() {self._storageLevel = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _relation: Spark_Connect_Relation? = nil + fileprivate var _storageLevel: Spark_Connect_StorageLevel? = nil + } + + struct Unpersist: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to unpersist. + var relation: Spark_Connect_Relation { + get {return _relation ?? Spark_Connect_Relation()} + set {_relation = newValue} + } + /// Returns true if `relation` has been explicitly set. + var hasRelation: Bool {return self._relation != nil} + /// Clears the value of `relation`. Subsequent reads from it will return its default value. + mutating func clearRelation() {self._relation = nil} + + /// (Optional) Whether to block until all blocks are deleted. + var blocking: Bool { + get {return _blocking ?? false} + set {_blocking = newValue} + } + /// Returns true if `blocking` has been explicitly set. + var hasBlocking: Bool {return self._blocking != nil} + /// Clears the value of `blocking`. Subsequent reads from it will return its default value. + mutating func clearBlocking() {self._blocking = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _relation: Spark_Connect_Relation? = nil + fileprivate var _blocking: Bool? = nil + } + + struct GetStorageLevel: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to get the storage level. + var relation: Spark_Connect_Relation { + get {return _relation ?? Spark_Connect_Relation()} + set {_relation = newValue} + } + /// Returns true if `relation` has been explicitly set. + var hasRelation: Bool {return self._relation != nil} + /// Clears the value of `relation`. Subsequent reads from it will return its default value. + mutating func clearRelation() {self._relation = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _relation: Spark_Connect_Relation? = nil + } + + struct JsonToDDL: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The JSON formatted string to be converted to DDL. + var jsonString: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Response to performing analysis of the query. Contains relevant metadata to be able to +/// reason about the performance. +/// Next ID: 16 +struct Spark_Connect_AnalyzePlanResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var sessionID: String = String() + + /// Server-side generated idempotency key that the client can use to assert that the server side + /// session has not changed. + var serverSideSessionID: String = String() + + var result: Spark_Connect_AnalyzePlanResponse.OneOf_Result? = nil + + var schema: Spark_Connect_AnalyzePlanResponse.Schema { + get { + if case .schema(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.Schema() + } + set {result = .schema(newValue)} + } + + var explain: Spark_Connect_AnalyzePlanResponse.Explain { + get { + if case .explain(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.Explain() + } + set {result = .explain(newValue)} + } + + var treeString: Spark_Connect_AnalyzePlanResponse.TreeString { + get { + if case .treeString(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.TreeString() + } + set {result = .treeString(newValue)} + } + + var isLocal: Spark_Connect_AnalyzePlanResponse.IsLocal { + get { + if case .isLocal(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.IsLocal() + } + set {result = .isLocal(newValue)} + } + + var isStreaming: Spark_Connect_AnalyzePlanResponse.IsStreaming { + get { + if case .isStreaming(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.IsStreaming() + } + set {result = .isStreaming(newValue)} + } + + var inputFiles: Spark_Connect_AnalyzePlanResponse.InputFiles { + get { + if case .inputFiles(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.InputFiles() + } + set {result = .inputFiles(newValue)} + } + + var sparkVersion: Spark_Connect_AnalyzePlanResponse.SparkVersion { + get { + if case .sparkVersion(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.SparkVersion() + } + set {result = .sparkVersion(newValue)} + } + + var ddlParse: Spark_Connect_AnalyzePlanResponse.DDLParse { + get { + if case .ddlParse(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.DDLParse() + } + set {result = .ddlParse(newValue)} + } + + var sameSemantics: Spark_Connect_AnalyzePlanResponse.SameSemantics { + get { + if case .sameSemantics(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.SameSemantics() + } + set {result = .sameSemantics(newValue)} + } + + var semanticHash: Spark_Connect_AnalyzePlanResponse.SemanticHash { + get { + if case .semanticHash(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.SemanticHash() + } + set {result = .semanticHash(newValue)} + } + + var persist: Spark_Connect_AnalyzePlanResponse.Persist { + get { + if case .persist(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.Persist() + } + set {result = .persist(newValue)} + } + + var unpersist: Spark_Connect_AnalyzePlanResponse.Unpersist { + get { + if case .unpersist(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.Unpersist() + } + set {result = .unpersist(newValue)} + } + + var getStorageLevel: Spark_Connect_AnalyzePlanResponse.GetStorageLevel { + get { + if case .getStorageLevel(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.GetStorageLevel() + } + set {result = .getStorageLevel(newValue)} + } + + var jsonToDdl: Spark_Connect_AnalyzePlanResponse.JsonToDDL { + get { + if case .jsonToDdl(let v)? = result {return v} + return Spark_Connect_AnalyzePlanResponse.JsonToDDL() + } + set {result = .jsonToDdl(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Result: Equatable, Sendable { + case schema(Spark_Connect_AnalyzePlanResponse.Schema) + case explain(Spark_Connect_AnalyzePlanResponse.Explain) + case treeString(Spark_Connect_AnalyzePlanResponse.TreeString) + case isLocal(Spark_Connect_AnalyzePlanResponse.IsLocal) + case isStreaming(Spark_Connect_AnalyzePlanResponse.IsStreaming) + case inputFiles(Spark_Connect_AnalyzePlanResponse.InputFiles) + case sparkVersion(Spark_Connect_AnalyzePlanResponse.SparkVersion) + case ddlParse(Spark_Connect_AnalyzePlanResponse.DDLParse) + case sameSemantics(Spark_Connect_AnalyzePlanResponse.SameSemantics) + case semanticHash(Spark_Connect_AnalyzePlanResponse.SemanticHash) + case persist(Spark_Connect_AnalyzePlanResponse.Persist) + case unpersist(Spark_Connect_AnalyzePlanResponse.Unpersist) + case getStorageLevel(Spark_Connect_AnalyzePlanResponse.GetStorageLevel) + case jsonToDdl(Spark_Connect_AnalyzePlanResponse.JsonToDDL) + + } + + struct Schema: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var schema: Spark_Connect_DataType { + get {return _schema ?? Spark_Connect_DataType()} + set {_schema = newValue} + } + /// Returns true if `schema` has been explicitly set. + var hasSchema: Bool {return self._schema != nil} + /// Clears the value of `schema`. Subsequent reads from it will return its default value. + mutating func clearSchema() {self._schema = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _schema: Spark_Connect_DataType? = nil + } + + struct Explain: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var explainString: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct TreeString: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var treeString: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct IsLocal: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var isLocal: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct IsStreaming: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var isStreaming: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct InputFiles: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// A best-effort snapshot of the files that compose this Dataset + var files: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct SparkVersion: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var version: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct DDLParse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var parsed: Spark_Connect_DataType { + get {return _parsed ?? Spark_Connect_DataType()} + set {_parsed = newValue} + } + /// Returns true if `parsed` has been explicitly set. + var hasParsed: Bool {return self._parsed != nil} + /// Clears the value of `parsed`. Subsequent reads from it will return its default value. + mutating func clearParsed() {self._parsed = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _parsed: Spark_Connect_DataType? = nil + } + + struct SameSemantics: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var result: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct SemanticHash: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var result: Int32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Persist: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Unpersist: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct GetStorageLevel: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The StorageLevel as a result of get_storage_level request. + var storageLevel: Spark_Connect_StorageLevel { + get {return _storageLevel ?? Spark_Connect_StorageLevel()} + set {_storageLevel = newValue} + } + /// Returns true if `storageLevel` has been explicitly set. + var hasStorageLevel: Bool {return self._storageLevel != nil} + /// Clears the value of `storageLevel`. Subsequent reads from it will return its default value. + mutating func clearStorageLevel() {self._storageLevel = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storageLevel: Spark_Connect_StorageLevel? = nil + } + + struct JsonToDDL: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var ddlString: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} +} + +/// A request to be executed by the service. +struct Spark_Connect_ExecutePlanRequest: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// + /// The session_id specifies a spark session for a user id (which is specified + /// by user_context.user_id). The session_id is set by the client to be able to + /// collate streaming responses from different queries within the dedicated session. + /// The id should be an UUID string of the format `00112233-4455-6677-8899-aabbccddeeff` + var sessionID: String { + get {return _storage._sessionID} + set {_uniqueStorage()._sessionID = newValue} + } + + /// (Optional) + /// + /// Server-side generated idempotency key from the previous responses (if any). Server + /// can use this to validate that the server side session has not changed. + var clientObservedServerSideSessionID: String { + get {return _storage._clientObservedServerSideSessionID ?? String()} + set {_uniqueStorage()._clientObservedServerSideSessionID = newValue} + } + /// Returns true if `clientObservedServerSideSessionID` has been explicitly set. + var hasClientObservedServerSideSessionID: Bool {return _storage._clientObservedServerSideSessionID != nil} + /// Clears the value of `clientObservedServerSideSessionID`. Subsequent reads from it will return its default value. + mutating func clearClientObservedServerSideSessionID() {_uniqueStorage()._clientObservedServerSideSessionID = nil} + + /// (Required) User context + /// + /// user_context.user_id and session+id both identify a unique remote spark session on the + /// server side. + var userContext: Spark_Connect_UserContext { + get {return _storage._userContext ?? Spark_Connect_UserContext()} + set {_uniqueStorage()._userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return _storage._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {_uniqueStorage()._userContext = nil} + + /// (Optional) + /// Provide an id for this request. If not provided, it will be generated by the server. + /// It is returned in every ExecutePlanResponse.operation_id of the ExecutePlan response stream. + /// The id must be an UUID string of the format `00112233-4455-6677-8899-aabbccddeeff` + var operationID: String { + get {return _storage._operationID ?? String()} + set {_uniqueStorage()._operationID = newValue} + } + /// Returns true if `operationID` has been explicitly set. + var hasOperationID: Bool {return _storage._operationID != nil} + /// Clears the value of `operationID`. Subsequent reads from it will return its default value. + mutating func clearOperationID() {_uniqueStorage()._operationID = nil} + + /// (Required) The logical plan to be executed / analyzed. + var plan: Spark_Connect_Plan { + get {return _storage._plan ?? Spark_Connect_Plan()} + set {_uniqueStorage()._plan = newValue} + } + /// Returns true if `plan` has been explicitly set. + var hasPlan: Bool {return _storage._plan != nil} + /// Clears the value of `plan`. Subsequent reads from it will return its default value. + mutating func clearPlan() {_uniqueStorage()._plan = nil} + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _storage._clientType ?? String()} + set {_uniqueStorage()._clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return _storage._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {_uniqueStorage()._clientType = nil} + + /// Repeated element for options that can be passed to the request. This element is currently + /// unused but allows to pass in an extension value used for arbitrary options. + var requestOptions: [Spark_Connect_ExecutePlanRequest.RequestOption] { + get {return _storage._requestOptions} + set {_uniqueStorage()._requestOptions = newValue} + } + + /// Tags to tag the given execution with. + /// Tags cannot contain ',' character and cannot be empty strings. + /// Used by Interrupt with interrupt.tag. + var tags: [String] { + get {return _storage._tags} + set {_uniqueStorage()._tags = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct RequestOption: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var requestOption: Spark_Connect_ExecutePlanRequest.RequestOption.OneOf_RequestOption? = nil + + var reattachOptions: Spark_Connect_ReattachOptions { + get { + if case .reattachOptions(let v)? = requestOption {return v} + return Spark_Connect_ReattachOptions() + } + set {requestOption = .reattachOptions(newValue)} + } + + /// Extension type for request options + var `extension`: SwiftProtobuf.Google_Protobuf_Any { + get { + if case .extension(let v)? = requestOption {return v} + return SwiftProtobuf.Google_Protobuf_Any() + } + set {requestOption = .extension(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_RequestOption: Equatable, Sendable { + case reattachOptions(Spark_Connect_ReattachOptions) + /// Extension type for request options + case `extension`(SwiftProtobuf.Google_Protobuf_Any) + + } + + init() {} + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// The response of a query, can be one or more for each request. Responses belonging to the +/// same input query, carry the same `session_id`. +/// Next ID: 17 +struct Spark_Connect_ExecutePlanResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var sessionID: String = String() + + /// Server-side generated idempotency key that the client can use to assert that the server side + /// session has not changed. + var serverSideSessionID: String = String() + + /// Identifies the ExecutePlan execution. + /// If set by the client in ExecutePlanRequest.operationId, that value is returned. + /// Otherwise generated by the server. + /// It is an UUID string of the format `00112233-4455-6677-8899-aabbccddeeff` + var operationID: String = String() + + /// Identified the response in the stream. + /// The id is an UUID string of the format `00112233-4455-6677-8899-aabbccddeeff` + var responseID: String = String() + + /// Union type for the different response messages. + var responseType: Spark_Connect_ExecutePlanResponse.OneOf_ResponseType? = nil + + var arrowBatch: Spark_Connect_ExecutePlanResponse.ArrowBatch { + get { + if case .arrowBatch(let v)? = responseType {return v} + return Spark_Connect_ExecutePlanResponse.ArrowBatch() + } + set {responseType = .arrowBatch(newValue)} + } + + /// Special case for executing SQL commands. + var sqlCommandResult: Spark_Connect_ExecutePlanResponse.SqlCommandResult { + get { + if case .sqlCommandResult(let v)? = responseType {return v} + return Spark_Connect_ExecutePlanResponse.SqlCommandResult() + } + set {responseType = .sqlCommandResult(newValue)} + } + + /// Response for a streaming query. + var writeStreamOperationStartResult: Spark_Connect_WriteStreamOperationStartResult { + get { + if case .writeStreamOperationStartResult(let v)? = responseType {return v} + return Spark_Connect_WriteStreamOperationStartResult() + } + set {responseType = .writeStreamOperationStartResult(newValue)} + } + + /// Response for commands on a streaming query. + var streamingQueryCommandResult: Spark_Connect_StreamingQueryCommandResult { + get { + if case .streamingQueryCommandResult(let v)? = responseType {return v} + return Spark_Connect_StreamingQueryCommandResult() + } + set {responseType = .streamingQueryCommandResult(newValue)} + } + + /// Response for 'SparkContext.resources'. + var getResourcesCommandResult: Spark_Connect_GetResourcesCommandResult { + get { + if case .getResourcesCommandResult(let v)? = responseType {return v} + return Spark_Connect_GetResourcesCommandResult() + } + set {responseType = .getResourcesCommandResult(newValue)} + } + + /// Response for commands on the streaming query manager. + var streamingQueryManagerCommandResult: Spark_Connect_StreamingQueryManagerCommandResult { + get { + if case .streamingQueryManagerCommandResult(let v)? = responseType {return v} + return Spark_Connect_StreamingQueryManagerCommandResult() + } + set {responseType = .streamingQueryManagerCommandResult(newValue)} + } + + /// Response for commands on the client side streaming query listener. + var streamingQueryListenerEventsResult: Spark_Connect_StreamingQueryListenerEventsResult { + get { + if case .streamingQueryListenerEventsResult(let v)? = responseType {return v} + return Spark_Connect_StreamingQueryListenerEventsResult() + } + set {responseType = .streamingQueryListenerEventsResult(newValue)} + } + + /// Response type informing if the stream is complete in reattachable execution. + var resultComplete: Spark_Connect_ExecutePlanResponse.ResultComplete { + get { + if case .resultComplete(let v)? = responseType {return v} + return Spark_Connect_ExecutePlanResponse.ResultComplete() + } + set {responseType = .resultComplete(newValue)} + } + + /// Response for command that creates ResourceProfile. + var createResourceProfileCommandResult: Spark_Connect_CreateResourceProfileCommandResult { + get { + if case .createResourceProfileCommandResult(let v)? = responseType {return v} + return Spark_Connect_CreateResourceProfileCommandResult() + } + set {responseType = .createResourceProfileCommandResult(newValue)} + } + + /// (Optional) Intermediate query progress reports. + var executionProgress: Spark_Connect_ExecutePlanResponse.ExecutionProgress { + get { + if case .executionProgress(let v)? = responseType {return v} + return Spark_Connect_ExecutePlanResponse.ExecutionProgress() + } + set {responseType = .executionProgress(newValue)} + } + + /// Response for command that checkpoints a DataFrame. + var checkpointCommandResult: Spark_Connect_CheckpointCommandResult { + get { + if case .checkpointCommandResult(let v)? = responseType {return v} + return Spark_Connect_CheckpointCommandResult() + } + set {responseType = .checkpointCommandResult(newValue)} + } + + /// ML command response + var mlCommandResult: Spark_Connect_MlCommandResult { + get { + if case .mlCommandResult(let v)? = responseType {return v} + return Spark_Connect_MlCommandResult() + } + set {responseType = .mlCommandResult(newValue)} + } + + /// Support arbitrary result objects. + var `extension`: SwiftProtobuf.Google_Protobuf_Any { + get { + if case .extension(let v)? = responseType {return v} + return SwiftProtobuf.Google_Protobuf_Any() + } + set {responseType = .extension(newValue)} + } + + /// Metrics for the query execution. Typically, this field is only present in the last + /// batch of results and then represent the overall state of the query execution. + var metrics: Spark_Connect_ExecutePlanResponse.Metrics { + get {return _metrics ?? Spark_Connect_ExecutePlanResponse.Metrics()} + set {_metrics = newValue} + } + /// Returns true if `metrics` has been explicitly set. + var hasMetrics: Bool {return self._metrics != nil} + /// Clears the value of `metrics`. Subsequent reads from it will return its default value. + mutating func clearMetrics() {self._metrics = nil} + + /// The metrics observed during the execution of the query plan. + var observedMetrics: [Spark_Connect_ExecutePlanResponse.ObservedMetrics] = [] + + /// (Optional) The Spark schema. This field is available when `collect` is called. + var schema: Spark_Connect_DataType { + get {return _schema ?? Spark_Connect_DataType()} + set {_schema = newValue} + } + /// Returns true if `schema` has been explicitly set. + var hasSchema: Bool {return self._schema != nil} + /// Clears the value of `schema`. Subsequent reads from it will return its default value. + mutating func clearSchema() {self._schema = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// Union type for the different response messages. + enum OneOf_ResponseType: Equatable, Sendable { + case arrowBatch(Spark_Connect_ExecutePlanResponse.ArrowBatch) + /// Special case for executing SQL commands. + case sqlCommandResult(Spark_Connect_ExecutePlanResponse.SqlCommandResult) + /// Response for a streaming query. + case writeStreamOperationStartResult(Spark_Connect_WriteStreamOperationStartResult) + /// Response for commands on a streaming query. + case streamingQueryCommandResult(Spark_Connect_StreamingQueryCommandResult) + /// Response for 'SparkContext.resources'. + case getResourcesCommandResult(Spark_Connect_GetResourcesCommandResult) + /// Response for commands on the streaming query manager. + case streamingQueryManagerCommandResult(Spark_Connect_StreamingQueryManagerCommandResult) + /// Response for commands on the client side streaming query listener. + case streamingQueryListenerEventsResult(Spark_Connect_StreamingQueryListenerEventsResult) + /// Response type informing if the stream is complete in reattachable execution. + case resultComplete(Spark_Connect_ExecutePlanResponse.ResultComplete) + /// Response for command that creates ResourceProfile. + case createResourceProfileCommandResult(Spark_Connect_CreateResourceProfileCommandResult) + /// (Optional) Intermediate query progress reports. + case executionProgress(Spark_Connect_ExecutePlanResponse.ExecutionProgress) + /// Response for command that checkpoints a DataFrame. + case checkpointCommandResult(Spark_Connect_CheckpointCommandResult) + /// ML command response + case mlCommandResult(Spark_Connect_MlCommandResult) + /// Support arbitrary result objects. + case `extension`(SwiftProtobuf.Google_Protobuf_Any) + + } + + /// A SQL command returns an opaque Relation that can be directly used as input for the next + /// call. + struct SqlCommandResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var relation: Spark_Connect_Relation { + get {return _relation ?? Spark_Connect_Relation()} + set {_relation = newValue} + } + /// Returns true if `relation` has been explicitly set. + var hasRelation: Bool {return self._relation != nil} + /// Clears the value of `relation`. Subsequent reads from it will return its default value. + mutating func clearRelation() {self._relation = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _relation: Spark_Connect_Relation? = nil + } + + /// Batch results of metrics. + struct ArrowBatch: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Count rows in `data`. Must match the number of rows inside `data`. + var rowCount: Int64 = 0 + + /// Serialized Arrow data. + var data: Data = Data() + + /// If set, row offset of the start of this ArrowBatch in execution results. + var startOffset: Int64 { + get {return _startOffset ?? 0} + set {_startOffset = newValue} + } + /// Returns true if `startOffset` has been explicitly set. + var hasStartOffset: Bool {return self._startOffset != nil} + /// Clears the value of `startOffset`. Subsequent reads from it will return its default value. + mutating func clearStartOffset() {self._startOffset = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _startOffset: Int64? = nil + } + + struct Metrics: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var metrics: [Spark_Connect_ExecutePlanResponse.Metrics.MetricObject] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct MetricObject: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var name: String = String() + + var planID: Int64 = 0 + + var parent: Int64 = 0 + + var executionMetrics: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct MetricValue: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var name: String = String() + + var value: Int64 = 0 + + var metricType: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + } + + struct ObservedMetrics: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var name: String = String() + + var values: [Spark_Connect_Expression.Literal] = [] + + var keys: [String] = [] + + var planID: Int64 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + /// If present, in a reattachable execution this means that after server sends onComplete, + /// the execution is complete. If the server sends onComplete without sending a ResultComplete, + /// it means that there is more, and the client should use ReattachExecute RPC to continue. + struct ResultComplete: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + /// This message is used to communicate progress about the query progress during the execution. + struct ExecutionProgress: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Captures the progress of each individual stage. + var stages: [Spark_Connect_ExecutePlanResponse.ExecutionProgress.StageInfo] = [] + + /// Captures the currently in progress tasks. + var numInflightTasks: Int64 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct StageInfo: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var stageID: Int64 = 0 + + var numTasks: Int64 = 0 + + var numCompletedTasks: Int64 = 0 + + var inputBytesRead: Int64 = 0 + + var done: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + } + + init() {} + + fileprivate var _metrics: Spark_Connect_ExecutePlanResponse.Metrics? = nil + fileprivate var _schema: Spark_Connect_DataType? = nil +} + +/// The key-value pair for the config request and response. +struct Spark_Connect_KeyValue: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The key. + var key: String = String() + + /// (Optional) The value. + var value: String { + get {return _value ?? String()} + set {_value = newValue} + } + /// Returns true if `value` has been explicitly set. + var hasValue: Bool {return self._value != nil} + /// Clears the value of `value`. Subsequent reads from it will return its default value. + mutating func clearValue() {self._value = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _value: String? = nil +} + +/// Request to update or fetch the configurations. +struct Spark_Connect_ConfigRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// + /// The session_id specifies a spark session for a user id (which is specified + /// by user_context.user_id). The session_id is set by the client to be able to + /// collate streaming responses from different queries within the dedicated session. + /// The id should be an UUID string of the format `00112233-4455-6677-8899-aabbccddeeff` + var sessionID: String = String() + + /// (Optional) + /// + /// Server-side generated idempotency key from the previous responses (if any). Server + /// can use this to validate that the server side session has not changed. + var clientObservedServerSideSessionID: String { + get {return _clientObservedServerSideSessionID ?? String()} + set {_clientObservedServerSideSessionID = newValue} + } + /// Returns true if `clientObservedServerSideSessionID` has been explicitly set. + var hasClientObservedServerSideSessionID: Bool {return self._clientObservedServerSideSessionID != nil} + /// Clears the value of `clientObservedServerSideSessionID`. Subsequent reads from it will return its default value. + mutating func clearClientObservedServerSideSessionID() {self._clientObservedServerSideSessionID = nil} + + /// (Required) User context + var userContext: Spark_Connect_UserContext { + get {return _userContext ?? Spark_Connect_UserContext()} + set {_userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return self._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {self._userContext = nil} + + /// (Required) The operation for the config. + var operation: Spark_Connect_ConfigRequest.Operation { + get {return _operation ?? Spark_Connect_ConfigRequest.Operation()} + set {_operation = newValue} + } + /// Returns true if `operation` has been explicitly set. + var hasOperation: Bool {return self._operation != nil} + /// Clears the value of `operation`. Subsequent reads from it will return its default value. + mutating func clearOperation() {self._operation = nil} + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _clientType ?? String()} + set {_clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return self._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {self._clientType = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct Operation: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var opType: Spark_Connect_ConfigRequest.Operation.OneOf_OpType? = nil + + var set: Spark_Connect_ConfigRequest.Set { + get { + if case .set(let v)? = opType {return v} + return Spark_Connect_ConfigRequest.Set() + } + set {opType = .set(newValue)} + } + + var get: Spark_Connect_ConfigRequest.Get { + get { + if case .get(let v)? = opType {return v} + return Spark_Connect_ConfigRequest.Get() + } + set {opType = .get(newValue)} + } + + var getWithDefault: Spark_Connect_ConfigRequest.GetWithDefault { + get { + if case .getWithDefault(let v)? = opType {return v} + return Spark_Connect_ConfigRequest.GetWithDefault() + } + set {opType = .getWithDefault(newValue)} + } + + var getOption: Spark_Connect_ConfigRequest.GetOption { + get { + if case .getOption(let v)? = opType {return v} + return Spark_Connect_ConfigRequest.GetOption() + } + set {opType = .getOption(newValue)} + } + + var getAll: Spark_Connect_ConfigRequest.GetAll { + get { + if case .getAll(let v)? = opType {return v} + return Spark_Connect_ConfigRequest.GetAll() + } + set {opType = .getAll(newValue)} + } + + var unset: Spark_Connect_ConfigRequest.Unset { + get { + if case .unset(let v)? = opType {return v} + return Spark_Connect_ConfigRequest.Unset() + } + set {opType = .unset(newValue)} + } + + var isModifiable: Spark_Connect_ConfigRequest.IsModifiable { + get { + if case .isModifiable(let v)? = opType {return v} + return Spark_Connect_ConfigRequest.IsModifiable() + } + set {opType = .isModifiable(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_OpType: Equatable, Sendable { + case set(Spark_Connect_ConfigRequest.Set) + case get(Spark_Connect_ConfigRequest.Get) + case getWithDefault(Spark_Connect_ConfigRequest.GetWithDefault) + case getOption(Spark_Connect_ConfigRequest.GetOption) + case getAll(Spark_Connect_ConfigRequest.GetAll) + case unset(Spark_Connect_ConfigRequest.Unset) + case isModifiable(Spark_Connect_ConfigRequest.IsModifiable) + + } + + init() {} + } + + struct Set: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The config key-value pairs to set. + var pairs: [Spark_Connect_KeyValue] = [] + + /// (Optional) Whether to ignore failures. + var silent: Bool { + get {return _silent ?? false} + set {_silent = newValue} + } + /// Returns true if `silent` has been explicitly set. + var hasSilent: Bool {return self._silent != nil} + /// Clears the value of `silent`. Subsequent reads from it will return its default value. + mutating func clearSilent() {self._silent = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _silent: Bool? = nil + } + + struct Get: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The config keys to get. + var keys: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct GetWithDefault: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The config key-value pairs to get. The value will be used as the default value. + var pairs: [Spark_Connect_KeyValue] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct GetOption: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The config keys to get optionally. + var keys: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct GetAll: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) The prefix of the config key to get. + var prefix: String { + get {return _prefix ?? String()} + set {_prefix = newValue} + } + /// Returns true if `prefix` has been explicitly set. + var hasPrefix: Bool {return self._prefix != nil} + /// Clears the value of `prefix`. Subsequent reads from it will return its default value. + mutating func clearPrefix() {self._prefix = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _prefix: String? = nil + } + + struct Unset: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The config keys to unset. + var keys: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct IsModifiable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The config keys to check the config is modifiable. + var keys: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _clientObservedServerSideSessionID: String? = nil + fileprivate var _userContext: Spark_Connect_UserContext? = nil + fileprivate var _operation: Spark_Connect_ConfigRequest.Operation? = nil + fileprivate var _clientType: String? = nil +} + +/// Response to the config request. +/// Next ID: 5 +struct Spark_Connect_ConfigResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var sessionID: String = String() + + /// Server-side generated idempotency key that the client can use to assert that the server side + /// session has not changed. + var serverSideSessionID: String = String() + + /// (Optional) The result key-value pairs. + /// + /// Available when the operation is 'Get', 'GetWithDefault', 'GetOption', 'GetAll'. + /// Also available for the operation 'IsModifiable' with boolean string "true" and "false". + var pairs: [Spark_Connect_KeyValue] = [] + + /// (Optional) + /// + /// Warning messages for deprecated or unsupported configurations. + var warnings: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Request to transfer client-local artifacts. +struct Spark_Connect_AddArtifactsRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// + /// The session_id specifies a spark session for a user id (which is specified + /// by user_context.user_id). The session_id is set by the client to be able to + /// collate streaming responses from different queries within the dedicated session. + /// The id should be an UUID string of the format `00112233-4455-6677-8899-aabbccddeeff` + var sessionID: String = String() + + /// User context + var userContext: Spark_Connect_UserContext { + get {return _userContext ?? Spark_Connect_UserContext()} + set {_userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return self._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {self._userContext = nil} + + /// (Optional) + /// + /// Server-side generated idempotency key from the previous responses (if any). Server + /// can use this to validate that the server side session has not changed. + var clientObservedServerSideSessionID: String { + get {return _clientObservedServerSideSessionID ?? String()} + set {_clientObservedServerSideSessionID = newValue} + } + /// Returns true if `clientObservedServerSideSessionID` has been explicitly set. + var hasClientObservedServerSideSessionID: Bool {return self._clientObservedServerSideSessionID != nil} + /// Clears the value of `clientObservedServerSideSessionID`. Subsequent reads from it will return its default value. + mutating func clearClientObservedServerSideSessionID() {self._clientObservedServerSideSessionID = nil} + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _clientType ?? String()} + set {_clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return self._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {self._clientType = nil} + + /// The payload is either a batch of artifacts or a partial chunk of a large artifact. + var payload: Spark_Connect_AddArtifactsRequest.OneOf_Payload? = nil + + var batch: Spark_Connect_AddArtifactsRequest.Batch { + get { + if case .batch(let v)? = payload {return v} + return Spark_Connect_AddArtifactsRequest.Batch() + } + set {payload = .batch(newValue)} + } + + /// The metadata and the initial chunk of a large artifact chunked into multiple requests. + /// The server side is notified about the total size of the large artifact as well as the + /// number of chunks to expect. + var beginChunk: Spark_Connect_AddArtifactsRequest.BeginChunkedArtifact { + get { + if case .beginChunk(let v)? = payload {return v} + return Spark_Connect_AddArtifactsRequest.BeginChunkedArtifact() + } + set {payload = .beginChunk(newValue)} + } + + /// A chunk of an artifact excluding metadata. This can be any chunk of a large artifact + /// excluding the first chunk (which is included in `BeginChunkedArtifact`). + var chunk: Spark_Connect_AddArtifactsRequest.ArtifactChunk { + get { + if case .chunk(let v)? = payload {return v} + return Spark_Connect_AddArtifactsRequest.ArtifactChunk() + } + set {payload = .chunk(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// The payload is either a batch of artifacts or a partial chunk of a large artifact. + enum OneOf_Payload: Equatable, Sendable { + case batch(Spark_Connect_AddArtifactsRequest.Batch) + /// The metadata and the initial chunk of a large artifact chunked into multiple requests. + /// The server side is notified about the total size of the large artifact as well as the + /// number of chunks to expect. + case beginChunk(Spark_Connect_AddArtifactsRequest.BeginChunkedArtifact) + /// A chunk of an artifact excluding metadata. This can be any chunk of a large artifact + /// excluding the first chunk (which is included in `BeginChunkedArtifact`). + case chunk(Spark_Connect_AddArtifactsRequest.ArtifactChunk) + + } + + /// A chunk of an Artifact. + struct ArtifactChunk: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Data chunk. + var data: Data = Data() + + /// CRC to allow server to verify integrity of the chunk. + var crc: Int64 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + /// An artifact that is contained in a single `ArtifactChunk`. + /// Generally, this message represents tiny artifacts such as REPL-generated class files. + struct SingleChunkArtifact: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// The name of the artifact is expected in the form of a "Relative Path" that is made up of a + /// sequence of directories and the final file element. + /// Examples of "Relative Path"s: "jars/test.jar", "classes/xyz.class", "abc.xyz", "a/b/X.jar". + /// The server is expected to maintain the hierarchy of files as defined by their name. (i.e + /// The relative path of the file on the server's filesystem will be the same as the name of + /// the provided artifact) + var name: String = String() + + /// A single data chunk. + var data: Spark_Connect_AddArtifactsRequest.ArtifactChunk { + get {return _data ?? Spark_Connect_AddArtifactsRequest.ArtifactChunk()} + set {_data = newValue} + } + /// Returns true if `data` has been explicitly set. + var hasData: Bool {return self._data != nil} + /// Clears the value of `data`. Subsequent reads from it will return its default value. + mutating func clearData() {self._data = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _data: Spark_Connect_AddArtifactsRequest.ArtifactChunk? = nil + } + + /// A number of `SingleChunkArtifact` batched into a single RPC. + struct Batch: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var artifacts: [Spark_Connect_AddArtifactsRequest.SingleChunkArtifact] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + /// Signals the beginning/start of a chunked artifact. + /// A large artifact is transferred through a payload of `BeginChunkedArtifact` followed by a + /// sequence of `ArtifactChunk`s. + struct BeginChunkedArtifact: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Name of the artifact undergoing chunking. Follows the same conventions as the `name` in + /// the `Artifact` message. + var name: String = String() + + /// Total size of the artifact in bytes. + var totalBytes: Int64 = 0 + + /// Number of chunks the artifact is split into. + /// This includes the `initial_chunk`. + var numChunks: Int64 = 0 + + /// The first/initial chunk. + var initialChunk: Spark_Connect_AddArtifactsRequest.ArtifactChunk { + get {return _initialChunk ?? Spark_Connect_AddArtifactsRequest.ArtifactChunk()} + set {_initialChunk = newValue} + } + /// Returns true if `initialChunk` has been explicitly set. + var hasInitialChunk: Bool {return self._initialChunk != nil} + /// Clears the value of `initialChunk`. Subsequent reads from it will return its default value. + mutating func clearInitialChunk() {self._initialChunk = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _initialChunk: Spark_Connect_AddArtifactsRequest.ArtifactChunk? = nil + } + + init() {} + + fileprivate var _userContext: Spark_Connect_UserContext? = nil + fileprivate var _clientObservedServerSideSessionID: String? = nil + fileprivate var _clientType: String? = nil +} + +/// Response to adding an artifact. Contains relevant metadata to verify successful transfer of +/// artifact(s). +/// Next ID: 4 +struct Spark_Connect_AddArtifactsResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Session id in which the AddArtifact was running. + var sessionID: String = String() + + /// Server-side generated idempotency key that the client can use to assert that the server side + /// session has not changed. + var serverSideSessionID: String = String() + + /// The list of artifact(s) seen by the server. + var artifacts: [Spark_Connect_AddArtifactsResponse.ArtifactSummary] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// Metadata of an artifact. + struct ArtifactSummary: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var name: String = String() + + /// Whether the CRC (Cyclic Redundancy Check) is successful on server verification. + /// The server discards any artifact that fails the CRC. + /// If false, the client may choose to resend the artifact specified by `name`. + var isCrcSuccessful: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} +} + +/// Request to get current statuses of artifacts at the server side. +struct Spark_Connect_ArtifactStatusesRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// + /// The session_id specifies a spark session for a user id (which is specified + /// by user_context.user_id). The session_id is set by the client to be able to + /// collate streaming responses from different queries within the dedicated session. + /// The id should be an UUID string of the format `00112233-4455-6677-8899-aabbccddeeff` + var sessionID: String = String() + + /// (Optional) + /// + /// Server-side generated idempotency key from the previous responses (if any). Server + /// can use this to validate that the server side session has not changed. + var clientObservedServerSideSessionID: String { + get {return _clientObservedServerSideSessionID ?? String()} + set {_clientObservedServerSideSessionID = newValue} + } + /// Returns true if `clientObservedServerSideSessionID` has been explicitly set. + var hasClientObservedServerSideSessionID: Bool {return self._clientObservedServerSideSessionID != nil} + /// Clears the value of `clientObservedServerSideSessionID`. Subsequent reads from it will return its default value. + mutating func clearClientObservedServerSideSessionID() {self._clientObservedServerSideSessionID = nil} + + /// User context + var userContext: Spark_Connect_UserContext { + get {return _userContext ?? Spark_Connect_UserContext()} + set {_userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return self._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {self._userContext = nil} + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _clientType ?? String()} + set {_clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return self._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {self._clientType = nil} + + /// The name of the artifact is expected in the form of a "Relative Path" that is made up of a + /// sequence of directories and the final file element. + /// Examples of "Relative Path"s: "jars/test.jar", "classes/xyz.class", "abc.xyz", "a/b/X.jar". + /// The server is expected to maintain the hierarchy of files as defined by their name. (i.e + /// The relative path of the file on the server's filesystem will be the same as the name of + /// the provided artifact) + var names: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _clientObservedServerSideSessionID: String? = nil + fileprivate var _userContext: Spark_Connect_UserContext? = nil + fileprivate var _clientType: String? = nil +} + +/// Response to checking artifact statuses. +/// Next ID: 4 +struct Spark_Connect_ArtifactStatusesResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Session id in which the ArtifactStatus was running. + var sessionID: String = String() + + /// Server-side generated idempotency key that the client can use to assert that the server side + /// session has not changed. + var serverSideSessionID: String = String() + + /// A map of artifact names to their statuses. + var statuses: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct ArtifactStatus: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Exists or not particular artifact at the server. + var exists: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} +} + +struct Spark_Connect_InterruptRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// + /// The session_id specifies a spark session for a user id (which is specified + /// by user_context.user_id). The session_id is set by the client to be able to + /// collate streaming responses from different queries within the dedicated session. + /// The id should be an UUID string of the format `00112233-4455-6677-8899-aabbccddeeff` + var sessionID: String = String() + + /// (Optional) + /// + /// Server-side generated idempotency key from the previous responses (if any). Server + /// can use this to validate that the server side session has not changed. + var clientObservedServerSideSessionID: String { + get {return _clientObservedServerSideSessionID ?? String()} + set {_clientObservedServerSideSessionID = newValue} + } + /// Returns true if `clientObservedServerSideSessionID` has been explicitly set. + var hasClientObservedServerSideSessionID: Bool {return self._clientObservedServerSideSessionID != nil} + /// Clears the value of `clientObservedServerSideSessionID`. Subsequent reads from it will return its default value. + mutating func clearClientObservedServerSideSessionID() {self._clientObservedServerSideSessionID = nil} + + /// (Required) User context + var userContext: Spark_Connect_UserContext { + get {return _userContext ?? Spark_Connect_UserContext()} + set {_userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return self._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {self._userContext = nil} + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _clientType ?? String()} + set {_clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return self._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {self._clientType = nil} + + /// (Required) The type of interrupt to execute. + var interruptType: Spark_Connect_InterruptRequest.InterruptType = .unspecified + + var interrupt: Spark_Connect_InterruptRequest.OneOf_Interrupt? = nil + + /// if interrupt_tag == INTERRUPT_TYPE_TAG, interrupt operation with this tag. + var operationTag: String { + get { + if case .operationTag(let v)? = interrupt {return v} + return String() + } + set {interrupt = .operationTag(newValue)} + } + + /// if interrupt_tag == INTERRUPT_TYPE_OPERATION_ID, interrupt operation with this operation_id. + var operationID: String { + get { + if case .operationID(let v)? = interrupt {return v} + return String() + } + set {interrupt = .operationID(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Interrupt: Equatable, Sendable { + /// if interrupt_tag == INTERRUPT_TYPE_TAG, interrupt operation with this tag. + case operationTag(String) + /// if interrupt_tag == INTERRUPT_TYPE_OPERATION_ID, interrupt operation with this operation_id. + case operationID(String) + + } + + enum InterruptType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + + /// Interrupt all running executions within the session with the provided session_id. + case all // = 1 + + /// Interrupt all running executions within the session with the provided operation_tag. + case tag // = 2 + + /// Interrupt the running execution within the session with the provided operation_id. + case operationID // = 3 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .all + case 2: self = .tag + case 3: self = .operationID + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .all: return 1 + case .tag: return 2 + case .operationID: return 3 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_InterruptRequest.InterruptType] = [ + .unspecified, + .all, + .tag, + .operationID, + ] + + } + + init() {} + + fileprivate var _clientObservedServerSideSessionID: String? = nil + fileprivate var _userContext: Spark_Connect_UserContext? = nil + fileprivate var _clientType: String? = nil +} + +/// Next ID: 4 +struct Spark_Connect_InterruptResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Session id in which the interrupt was running. + var sessionID: String = String() + + /// Server-side generated idempotency key that the client can use to assert that the server side + /// session has not changed. + var serverSideSessionID: String = String() + + /// Operation ids of the executions which were interrupted. + var interruptedIds: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_ReattachOptions: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// If true, the request can be reattached to using ReattachExecute. + /// ReattachExecute can be used either if the stream broke with a GRPC network error, + /// or if the server closed the stream without sending a response with StreamStatus.complete=true. + /// The server will keep a buffer of responses in case a response is lost, and + /// ReattachExecute needs to back-track. + /// + /// If false, the execution response stream will will not be reattachable, and all responses are + /// immediately released by the server after being sent. + var reattachable: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_ReattachExecuteRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// + /// The session_id of the request to reattach to. + /// This must be an id of existing session. + var sessionID: String = String() + + /// (Optional) + /// + /// Server-side generated idempotency key from the previous responses (if any). Server + /// can use this to validate that the server side session has not changed. + var clientObservedServerSideSessionID: String { + get {return _clientObservedServerSideSessionID ?? String()} + set {_clientObservedServerSideSessionID = newValue} + } + /// Returns true if `clientObservedServerSideSessionID` has been explicitly set. + var hasClientObservedServerSideSessionID: Bool {return self._clientObservedServerSideSessionID != nil} + /// Clears the value of `clientObservedServerSideSessionID`. Subsequent reads from it will return its default value. + mutating func clearClientObservedServerSideSessionID() {self._clientObservedServerSideSessionID = nil} + + /// (Required) User context + /// + /// user_context.user_id and session+id both identify a unique remote spark session on the + /// server side. + var userContext: Spark_Connect_UserContext { + get {return _userContext ?? Spark_Connect_UserContext()} + set {_userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return self._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {self._userContext = nil} + + /// (Required) + /// Provide an id of the request to reattach to. + /// This must be an id of existing operation. + var operationID: String = String() + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _clientType ?? String()} + set {_clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return self._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {self._clientType = nil} + + /// (Optional) + /// Last already processed response id from the response stream. + /// After reattach, server will resume the response stream after that response. + /// If not specified, server will restart the stream from the start. + /// + /// Note: server controls the amount of responses that it buffers and it may drop responses, + /// that are far behind the latest returned response, so this can't be used to arbitrarily + /// scroll back the cursor. If the response is no longer available, this will result in an error. + var lastResponseID: String { + get {return _lastResponseID ?? String()} + set {_lastResponseID = newValue} + } + /// Returns true if `lastResponseID` has been explicitly set. + var hasLastResponseID: Bool {return self._lastResponseID != nil} + /// Clears the value of `lastResponseID`. Subsequent reads from it will return its default value. + mutating func clearLastResponseID() {self._lastResponseID = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _clientObservedServerSideSessionID: String? = nil + fileprivate var _userContext: Spark_Connect_UserContext? = nil + fileprivate var _clientType: String? = nil + fileprivate var _lastResponseID: String? = nil +} + +struct Spark_Connect_ReleaseExecuteRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// + /// The session_id of the request to reattach to. + /// This must be an id of existing session. + var sessionID: String = String() + + /// (Optional) + /// + /// Server-side generated idempotency key from the previous responses (if any). Server + /// can use this to validate that the server side session has not changed. + var clientObservedServerSideSessionID: String { + get {return _clientObservedServerSideSessionID ?? String()} + set {_clientObservedServerSideSessionID = newValue} + } + /// Returns true if `clientObservedServerSideSessionID` has been explicitly set. + var hasClientObservedServerSideSessionID: Bool {return self._clientObservedServerSideSessionID != nil} + /// Clears the value of `clientObservedServerSideSessionID`. Subsequent reads from it will return its default value. + mutating func clearClientObservedServerSideSessionID() {self._clientObservedServerSideSessionID = nil} + + /// (Required) User context + /// + /// user_context.user_id and session+id both identify a unique remote spark session on the + /// server side. + var userContext: Spark_Connect_UserContext { + get {return _userContext ?? Spark_Connect_UserContext()} + set {_userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return self._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {self._userContext = nil} + + /// (Required) + /// Provide an id of the request to reattach to. + /// This must be an id of existing operation. + var operationID: String = String() + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _clientType ?? String()} + set {_clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return self._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {self._clientType = nil} + + var release: Spark_Connect_ReleaseExecuteRequest.OneOf_Release? = nil + + var releaseAll: Spark_Connect_ReleaseExecuteRequest.ReleaseAll { + get { + if case .releaseAll(let v)? = release {return v} + return Spark_Connect_ReleaseExecuteRequest.ReleaseAll() + } + set {release = .releaseAll(newValue)} + } + + var releaseUntil: Spark_Connect_ReleaseExecuteRequest.ReleaseUntil { + get { + if case .releaseUntil(let v)? = release {return v} + return Spark_Connect_ReleaseExecuteRequest.ReleaseUntil() + } + set {release = .releaseUntil(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Release: Equatable, Sendable { + case releaseAll(Spark_Connect_ReleaseExecuteRequest.ReleaseAll) + case releaseUntil(Spark_Connect_ReleaseExecuteRequest.ReleaseUntil) + + } + + /// Release and close operation completely. + /// This will also interrupt the query if it is running execution, and wait for it to be torn down. + struct ReleaseAll: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + /// Release all responses from the operation response stream up to and including + /// the response with the given by response_id. + /// While server determines by itself how much of a buffer of responses to keep, client providing + /// explicit release calls will help reduce resource consumption. + /// Noop if response_id not found in cached responses. + struct ReleaseUntil: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var responseID: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _clientObservedServerSideSessionID: String? = nil + fileprivate var _userContext: Spark_Connect_UserContext? = nil + fileprivate var _clientType: String? = nil +} + +/// Next ID: 4 +struct Spark_Connect_ReleaseExecuteResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Session id in which the release was running. + var sessionID: String = String() + + /// Server-side generated idempotency key that the client can use to assert that the server side + /// session has not changed. + var serverSideSessionID: String = String() + + /// Operation id of the operation on which the release executed. + /// If the operation couldn't be found (because e.g. it was concurrently released), will be unset. + /// Otherwise, it will be equal to the operation_id from request. + var operationID: String { + get {return _operationID ?? String()} + set {_operationID = newValue} + } + /// Returns true if `operationID` has been explicitly set. + var hasOperationID: Bool {return self._operationID != nil} + /// Clears the value of `operationID`. Subsequent reads from it will return its default value. + mutating func clearOperationID() {self._operationID = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _operationID: String? = nil +} + +struct Spark_Connect_ReleaseSessionRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// + /// The session_id of the request to reattach to. + /// This must be an id of existing session. + var sessionID: String = String() + + /// (Required) User context + /// + /// user_context.user_id and session+id both identify a unique remote spark session on the + /// server side. + var userContext: Spark_Connect_UserContext { + get {return _userContext ?? Spark_Connect_UserContext()} + set {_userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return self._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {self._userContext = nil} + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _clientType ?? String()} + set {_clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return self._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {self._clientType = nil} + + /// Signals the server to allow the client to reconnect to the session after it is released. + /// + /// By default, the server tombstones the session upon release, preventing reconnections and + /// fully cleaning the session state. + /// + /// If this flag is set to true, the server may permit the client to reconnect to the session + /// post-release, even if the session state has been cleaned. This can result in missing state, + /// such as Temporary Views, Temporary UDFs, or the Current Catalog, in the reconnected session. + /// + /// Use this option sparingly and only when the client fully understands the implications of + /// reconnecting to a released session. The client must ensure that any queries executed do not + /// rely on the session state prior to its release. + var allowReconnect: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _userContext: Spark_Connect_UserContext? = nil + fileprivate var _clientType: String? = nil +} + +/// Next ID: 3 +struct Spark_Connect_ReleaseSessionResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Session id of the session on which the release executed. + var sessionID: String = String() + + /// Server-side generated idempotency key that the client can use to assert that the server side + /// session has not changed. + var serverSideSessionID: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_FetchErrorDetailsRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + /// The session_id specifies a Spark session for a user identified by user_context.user_id. + /// The id should be a UUID string of the format `00112233-4455-6677-8899-aabbccddeeff`. + var sessionID: String = String() + + /// (Optional) + /// + /// Server-side generated idempotency key from the previous responses (if any). Server + /// can use this to validate that the server side session has not changed. + var clientObservedServerSideSessionID: String { + get {return _clientObservedServerSideSessionID ?? String()} + set {_clientObservedServerSideSessionID = newValue} + } + /// Returns true if `clientObservedServerSideSessionID` has been explicitly set. + var hasClientObservedServerSideSessionID: Bool {return self._clientObservedServerSideSessionID != nil} + /// Clears the value of `clientObservedServerSideSessionID`. Subsequent reads from it will return its default value. + mutating func clearClientObservedServerSideSessionID() {self._clientObservedServerSideSessionID = nil} + + /// User context + var userContext: Spark_Connect_UserContext { + get {return _userContext ?? Spark_Connect_UserContext()} + set {_userContext = newValue} + } + /// Returns true if `userContext` has been explicitly set. + var hasUserContext: Bool {return self._userContext != nil} + /// Clears the value of `userContext`. Subsequent reads from it will return its default value. + mutating func clearUserContext() {self._userContext = nil} + + /// (Required) + /// The id of the error. + var errorID: String = String() + + /// Provides optional information about the client sending the request. This field + /// can be used for language or version specific information and is only intended for + /// logging purposes and will not be interpreted by the server. + var clientType: String { + get {return _clientType ?? String()} + set {_clientType = newValue} + } + /// Returns true if `clientType` has been explicitly set. + var hasClientType: Bool {return self._clientType != nil} + /// Clears the value of `clientType`. Subsequent reads from it will return its default value. + mutating func clearClientType() {self._clientType = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _clientObservedServerSideSessionID: String? = nil + fileprivate var _userContext: Spark_Connect_UserContext? = nil + fileprivate var _clientType: String? = nil +} + +/// Next ID: 5 +struct Spark_Connect_FetchErrorDetailsResponse: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Server-side generated idempotency key that the client can use to assert that the server side + /// session has not changed. + var serverSideSessionID: String = String() + + var sessionID: String = String() + + /// The index of the root error in errors. The field will not be set if the error is not found. + var rootErrorIdx: Int32 { + get {return _rootErrorIdx ?? 0} + set {_rootErrorIdx = newValue} + } + /// Returns true if `rootErrorIdx` has been explicitly set. + var hasRootErrorIdx: Bool {return self._rootErrorIdx != nil} + /// Clears the value of `rootErrorIdx`. Subsequent reads from it will return its default value. + mutating func clearRootErrorIdx() {self._rootErrorIdx = nil} + + /// A list of errors. + var errors: [Spark_Connect_FetchErrorDetailsResponse.Error] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct StackTraceElement: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// The fully qualified name of the class containing the execution point. + var declaringClass: String = String() + + /// The name of the method containing the execution point. + var methodName: String = String() + + /// The name of the file containing the execution point. + var fileName: String { + get {return _fileName ?? String()} + set {_fileName = newValue} + } + /// Returns true if `fileName` has been explicitly set. + var hasFileName: Bool {return self._fileName != nil} + /// Clears the value of `fileName`. Subsequent reads from it will return its default value. + mutating func clearFileName() {self._fileName = nil} + + /// The line number of the source line containing the execution point. + var lineNumber: Int32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _fileName: String? = nil + } + + /// QueryContext defines the schema for the query context of a SparkThrowable. + /// It helps users understand where the error occurs while executing queries. + struct QueryContext: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var contextType: Spark_Connect_FetchErrorDetailsResponse.QueryContext.ContextType = .sql + + /// The object type of the query which throws the exception. + /// If the exception is directly from the main query, it should be an empty string. + /// Otherwise, it should be the exact object type in upper case. For example, a "VIEW". + var objectType: String = String() + + /// The object name of the query which throws the exception. + /// If the exception is directly from the main query, it should be an empty string. + /// Otherwise, it should be the object name. For example, a view name "V1". + var objectName: String = String() + + /// The starting index in the query text which throws the exception. The index starts from 0. + var startIndex: Int32 = 0 + + /// The stopping index in the query which throws the exception. The index starts from 0. + var stopIndex: Int32 = 0 + + /// The corresponding fragment of the query which throws the exception. + var fragment: String = String() + + /// The user code (call site of the API) that caused throwing the exception. + var callSite: String = String() + + /// Summary of the exception cause. + var summary: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// The type of this query context. + enum ContextType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case sql // = 0 + case dataframe // = 1 + case UNRECOGNIZED(Int) + + init() { + self = .sql + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sql + case 1: self = .dataframe + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .sql: return 0 + case .dataframe: return 1 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_FetchErrorDetailsResponse.QueryContext.ContextType] = [ + .sql, + .dataframe, + ] + + } + + init() {} + } + + /// SparkThrowable defines the schema for SparkThrowable exceptions. + struct SparkThrowable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Succinct, human-readable, unique, and consistent representation of the error category. + var errorClass: String { + get {return _errorClass ?? String()} + set {_errorClass = newValue} + } + /// Returns true if `errorClass` has been explicitly set. + var hasErrorClass: Bool {return self._errorClass != nil} + /// Clears the value of `errorClass`. Subsequent reads from it will return its default value. + mutating func clearErrorClass() {self._errorClass = nil} + + /// The message parameters for the error framework. + var messageParameters: Dictionary = [:] + + /// The query context of a SparkThrowable. + var queryContexts: [Spark_Connect_FetchErrorDetailsResponse.QueryContext] = [] + + /// Portable error identifier across SQL engines + /// If null, error class or SQLSTATE is not set. + var sqlState: String { + get {return _sqlState ?? String()} + set {_sqlState = newValue} + } + /// Returns true if `sqlState` has been explicitly set. + var hasSqlState: Bool {return self._sqlState != nil} + /// Clears the value of `sqlState`. Subsequent reads from it will return its default value. + mutating func clearSqlState() {self._sqlState = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _errorClass: String? = nil + fileprivate var _sqlState: String? = nil + } + + /// Error defines the schema for the representing exception. + struct Error: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// The fully qualified names of the exception class and its parent classes. + var errorTypeHierarchy: [String] = [] + + /// The detailed message of the exception. + var message: String = String() + + /// The stackTrace of the exception. It will be set + /// if the SQLConf spark.sql.connect.serverStacktrace.enabled is true. + var stackTrace: [Spark_Connect_FetchErrorDetailsResponse.StackTraceElement] = [] + + /// The index of the cause error in errors. + var causeIdx: Int32 { + get {return _causeIdx ?? 0} + set {_causeIdx = newValue} + } + /// Returns true if `causeIdx` has been explicitly set. + var hasCauseIdx: Bool {return self._causeIdx != nil} + /// Clears the value of `causeIdx`. Subsequent reads from it will return its default value. + mutating func clearCauseIdx() {self._causeIdx = nil} + + /// The structured data of a SparkThrowable exception. + var sparkThrowable: Spark_Connect_FetchErrorDetailsResponse.SparkThrowable { + get {return _sparkThrowable ?? Spark_Connect_FetchErrorDetailsResponse.SparkThrowable()} + set {_sparkThrowable = newValue} + } + /// Returns true if `sparkThrowable` has been explicitly set. + var hasSparkThrowable: Bool {return self._sparkThrowable != nil} + /// Clears the value of `sparkThrowable`. Subsequent reads from it will return its default value. + mutating func clearSparkThrowable() {self._sparkThrowable = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _causeIdx: Int32? = nil + fileprivate var _sparkThrowable: Spark_Connect_FetchErrorDetailsResponse.SparkThrowable? = nil + } + + init() {} + + fileprivate var _rootErrorIdx: Int32? = nil +} + +struct Spark_Connect_CheckpointCommandResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan checkpointed. + var relation: Spark_Connect_CachedRemoteRelation { + get {return _relation ?? Spark_Connect_CachedRemoteRelation()} + set {_relation = newValue} + } + /// Returns true if `relation` has been explicitly set. + var hasRelation: Bool {return self._relation != nil} + /// Clears the value of `relation`. Subsequent reads from it will return its default value. + mutating func clearRelation() {self._relation = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _relation: Spark_Connect_CachedRemoteRelation? = nil +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_Plan: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Plan" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "root"), + 2: .same(proto: "command"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_Relation? + var hadOneofValue = false + if let current = self.opType { + hadOneofValue = true + if case .root(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.opType = .root(v) + } + }() + case 2: try { + var v: Spark_Connect_Command? + var hadOneofValue = false + if let current = self.opType { + hadOneofValue = true + if case .command(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.opType = .command(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.opType { + case .root?: try { + guard case .root(let v)? = self.opType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .command?: try { + guard case .command(let v)? = self.opType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Plan, rhs: Spark_Connect_Plan) -> Bool { + if lhs.opType != rhs.opType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_UserContext: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".UserContext" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "user_id"), + 2: .standard(proto: "user_name"), + 999: .same(proto: "extensions"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.userID) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.userName) }() + case 999: try { try decoder.decodeRepeatedMessageField(value: &self.extensions) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.userID.isEmpty { + try visitor.visitSingularStringField(value: self.userID, fieldNumber: 1) + } + if !self.userName.isEmpty { + try visitor.visitSingularStringField(value: self.userName, fieldNumber: 2) + } + if !self.extensions.isEmpty { + try visitor.visitRepeatedMessageField(value: self.extensions, fieldNumber: 999) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_UserContext, rhs: Spark_Connect_UserContext) -> Bool { + if lhs.userID != rhs.userID {return false} + if lhs.userName != rhs.userName {return false} + if lhs.extensions != rhs.extensions {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".AnalyzePlanRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 17: .standard(proto: "client_observed_server_side_session_id"), + 2: .standard(proto: "user_context"), + 3: .standard(proto: "client_type"), + 4: .same(proto: "schema"), + 5: .same(proto: "explain"), + 6: .standard(proto: "tree_string"), + 7: .standard(proto: "is_local"), + 8: .standard(proto: "is_streaming"), + 9: .standard(proto: "input_files"), + 10: .standard(proto: "spark_version"), + 11: .standard(proto: "ddl_parse"), + 12: .standard(proto: "same_semantics"), + 13: .standard(proto: "semantic_hash"), + 14: .same(proto: "persist"), + 15: .same(proto: "unpersist"), + 16: .standard(proto: "get_storage_level"), + 18: .standard(proto: "json_to_ddl"), + ] + + fileprivate class _StorageClass { + var _sessionID: String = String() + var _clientObservedServerSideSessionID: String? = nil + var _userContext: Spark_Connect_UserContext? = nil + var _clientType: String? = nil + var _analyze: Spark_Connect_AnalyzePlanRequest.OneOf_Analyze? + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _sessionID = source._sessionID + _clientObservedServerSideSessionID = source._clientObservedServerSideSessionID + _userContext = source._userContext + _clientType = source._clientType + _analyze = source._analyze + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &_storage._sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._userContext) }() + case 3: try { try decoder.decodeSingularStringField(value: &_storage._clientType) }() + case 4: try { + var v: Spark_Connect_AnalyzePlanRequest.Schema? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .schema(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .schema(v) + } + }() + case 5: try { + var v: Spark_Connect_AnalyzePlanRequest.Explain? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .explain(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .explain(v) + } + }() + case 6: try { + var v: Spark_Connect_AnalyzePlanRequest.TreeString? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .treeString(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .treeString(v) + } + }() + case 7: try { + var v: Spark_Connect_AnalyzePlanRequest.IsLocal? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .isLocal(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .isLocal(v) + } + }() + case 8: try { + var v: Spark_Connect_AnalyzePlanRequest.IsStreaming? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .isStreaming(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .isStreaming(v) + } + }() + case 9: try { + var v: Spark_Connect_AnalyzePlanRequest.InputFiles? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .inputFiles(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .inputFiles(v) + } + }() + case 10: try { + var v: Spark_Connect_AnalyzePlanRequest.SparkVersion? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .sparkVersion(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .sparkVersion(v) + } + }() + case 11: try { + var v: Spark_Connect_AnalyzePlanRequest.DDLParse? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .ddlParse(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .ddlParse(v) + } + }() + case 12: try { + var v: Spark_Connect_AnalyzePlanRequest.SameSemantics? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .sameSemantics(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .sameSemantics(v) + } + }() + case 13: try { + var v: Spark_Connect_AnalyzePlanRequest.SemanticHash? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .semanticHash(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .semanticHash(v) + } + }() + case 14: try { + var v: Spark_Connect_AnalyzePlanRequest.Persist? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .persist(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .persist(v) + } + }() + case 15: try { + var v: Spark_Connect_AnalyzePlanRequest.Unpersist? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .unpersist(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .unpersist(v) + } + }() + case 16: try { + var v: Spark_Connect_AnalyzePlanRequest.GetStorageLevel? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .getStorageLevel(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .getStorageLevel(v) + } + }() + case 17: try { try decoder.decodeSingularStringField(value: &_storage._clientObservedServerSideSessionID) }() + case 18: try { + var v: Spark_Connect_AnalyzePlanRequest.JsonToDDL? + var hadOneofValue = false + if let current = _storage._analyze { + hadOneofValue = true + if case .jsonToDdl(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._analyze = .jsonToDdl(v) + } + }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !_storage._sessionID.isEmpty { + try visitor.visitSingularStringField(value: _storage._sessionID, fieldNumber: 1) + } + try { if let v = _storage._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = _storage._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + switch _storage._analyze { + case .schema?: try { + guard case .schema(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .explain?: try { + guard case .explain(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .treeString?: try { + guard case .treeString(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case .isLocal?: try { + guard case .isLocal(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case .isStreaming?: try { + guard case .isStreaming(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + }() + case .inputFiles?: try { + guard case .inputFiles(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 9) + }() + case .sparkVersion?: try { + guard case .sparkVersion(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 10) + }() + case .ddlParse?: try { + guard case .ddlParse(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 11) + }() + case .sameSemantics?: try { + guard case .sameSemantics(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 12) + }() + case .semanticHash?: try { + guard case .semanticHash(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 13) + }() + case .persist?: try { + guard case .persist(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 14) + }() + case .unpersist?: try { + guard case .unpersist(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 15) + }() + case .getStorageLevel?: try { + guard case .getStorageLevel(let v)? = _storage._analyze else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 16) + }() + default: break + } + try { if let v = _storage._clientObservedServerSideSessionID { + try visitor.visitSingularStringField(value: v, fieldNumber: 17) + } }() + try { if case .jsonToDdl(let v)? = _storage._analyze { + try visitor.visitSingularMessageField(value: v, fieldNumber: 18) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest, rhs: Spark_Connect_AnalyzePlanRequest) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._sessionID != rhs_storage._sessionID {return false} + if _storage._clientObservedServerSideSessionID != rhs_storage._clientObservedServerSideSessionID {return false} + if _storage._userContext != rhs_storage._userContext {return false} + if _storage._clientType != rhs_storage._clientType {return false} + if _storage._analyze != rhs_storage._analyze {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.Schema: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".Schema" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._plan) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.Schema, rhs: Spark_Connect_AnalyzePlanRequest.Schema) -> Bool { + if lhs._plan != rhs._plan {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.Explain: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".Explain" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + 2: .standard(proto: "explain_mode"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._plan) }() + case 2: try { try decoder.decodeSingularEnumField(value: &self.explainMode) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if self.explainMode != .unspecified { + try visitor.visitSingularEnumField(value: self.explainMode, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.Explain, rhs: Spark_Connect_AnalyzePlanRequest.Explain) -> Bool { + if lhs._plan != rhs._plan {return false} + if lhs.explainMode != rhs.explainMode {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.Explain.ExplainMode: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "EXPLAIN_MODE_UNSPECIFIED"), + 1: .same(proto: "EXPLAIN_MODE_SIMPLE"), + 2: .same(proto: "EXPLAIN_MODE_EXTENDED"), + 3: .same(proto: "EXPLAIN_MODE_CODEGEN"), + 4: .same(proto: "EXPLAIN_MODE_COST"), + 5: .same(proto: "EXPLAIN_MODE_FORMATTED"), + ] +} + +extension Spark_Connect_AnalyzePlanRequest.TreeString: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".TreeString" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + 2: .same(proto: "level"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._plan) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self._level) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._level { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.TreeString, rhs: Spark_Connect_AnalyzePlanRequest.TreeString) -> Bool { + if lhs._plan != rhs._plan {return false} + if lhs._level != rhs._level {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.IsLocal: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".IsLocal" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._plan) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.IsLocal, rhs: Spark_Connect_AnalyzePlanRequest.IsLocal) -> Bool { + if lhs._plan != rhs._plan {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.IsStreaming: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".IsStreaming" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._plan) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.IsStreaming, rhs: Spark_Connect_AnalyzePlanRequest.IsStreaming) -> Bool { + if lhs._plan != rhs._plan {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.InputFiles: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".InputFiles" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._plan) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.InputFiles, rhs: Spark_Connect_AnalyzePlanRequest.InputFiles) -> Bool { + if lhs._plan != rhs._plan {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.SparkVersion: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".SparkVersion" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.SparkVersion, rhs: Spark_Connect_AnalyzePlanRequest.SparkVersion) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.DDLParse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".DDLParse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "ddl_string"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.ddlString) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.ddlString.isEmpty { + try visitor.visitSingularStringField(value: self.ddlString, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.DDLParse, rhs: Spark_Connect_AnalyzePlanRequest.DDLParse) -> Bool { + if lhs.ddlString != rhs.ddlString {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.SameSemantics: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".SameSemantics" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "target_plan"), + 2: .standard(proto: "other_plan"), + ] + + fileprivate class _StorageClass { + var _targetPlan: Spark_Connect_Plan? = nil + var _otherPlan: Spark_Connect_Plan? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _targetPlan = source._targetPlan + _otherPlan = source._otherPlan + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._targetPlan) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._otherPlan) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._targetPlan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._otherPlan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.SameSemantics, rhs: Spark_Connect_AnalyzePlanRequest.SameSemantics) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._targetPlan != rhs_storage._targetPlan {return false} + if _storage._otherPlan != rhs_storage._otherPlan {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.SemanticHash: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".SemanticHash" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "plan"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._plan) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.SemanticHash, rhs: Spark_Connect_AnalyzePlanRequest.SemanticHash) -> Bool { + if lhs._plan != rhs._plan {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.Persist: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".Persist" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "relation"), + 2: .standard(proto: "storage_level"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._relation) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._storageLevel) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._relation { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._storageLevel { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.Persist, rhs: Spark_Connect_AnalyzePlanRequest.Persist) -> Bool { + if lhs._relation != rhs._relation {return false} + if lhs._storageLevel != rhs._storageLevel {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.Unpersist: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".Unpersist" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "relation"), + 2: .same(proto: "blocking"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._relation) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self._blocking) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._relation { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._blocking { + try visitor.visitSingularBoolField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.Unpersist, rhs: Spark_Connect_AnalyzePlanRequest.Unpersist) -> Bool { + if lhs._relation != rhs._relation {return false} + if lhs._blocking != rhs._blocking {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.GetStorageLevel: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".GetStorageLevel" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "relation"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._relation) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._relation { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.GetStorageLevel, rhs: Spark_Connect_AnalyzePlanRequest.GetStorageLevel) -> Bool { + if lhs._relation != rhs._relation {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanRequest.JsonToDDL: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanRequest.protoMessageName + ".JsonToDDL" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "json_string"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.jsonString) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.jsonString.isEmpty { + try visitor.visitSingularStringField(value: self.jsonString, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanRequest.JsonToDDL, rhs: Spark_Connect_AnalyzePlanRequest.JsonToDDL) -> Bool { + if lhs.jsonString != rhs.jsonString {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".AnalyzePlanResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 15: .standard(proto: "server_side_session_id"), + 2: .same(proto: "schema"), + 3: .same(proto: "explain"), + 4: .standard(proto: "tree_string"), + 5: .standard(proto: "is_local"), + 6: .standard(proto: "is_streaming"), + 7: .standard(proto: "input_files"), + 8: .standard(proto: "spark_version"), + 9: .standard(proto: "ddl_parse"), + 10: .standard(proto: "same_semantics"), + 11: .standard(proto: "semantic_hash"), + 12: .same(proto: "persist"), + 13: .same(proto: "unpersist"), + 14: .standard(proto: "get_storage_level"), + 16: .standard(proto: "json_to_ddl"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { + var v: Spark_Connect_AnalyzePlanResponse.Schema? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .schema(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .schema(v) + } + }() + case 3: try { + var v: Spark_Connect_AnalyzePlanResponse.Explain? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .explain(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .explain(v) + } + }() + case 4: try { + var v: Spark_Connect_AnalyzePlanResponse.TreeString? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .treeString(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .treeString(v) + } + }() + case 5: try { + var v: Spark_Connect_AnalyzePlanResponse.IsLocal? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .isLocal(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .isLocal(v) + } + }() + case 6: try { + var v: Spark_Connect_AnalyzePlanResponse.IsStreaming? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .isStreaming(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .isStreaming(v) + } + }() + case 7: try { + var v: Spark_Connect_AnalyzePlanResponse.InputFiles? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .inputFiles(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .inputFiles(v) + } + }() + case 8: try { + var v: Spark_Connect_AnalyzePlanResponse.SparkVersion? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .sparkVersion(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .sparkVersion(v) + } + }() + case 9: try { + var v: Spark_Connect_AnalyzePlanResponse.DDLParse? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .ddlParse(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .ddlParse(v) + } + }() + case 10: try { + var v: Spark_Connect_AnalyzePlanResponse.SameSemantics? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .sameSemantics(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .sameSemantics(v) + } + }() + case 11: try { + var v: Spark_Connect_AnalyzePlanResponse.SemanticHash? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .semanticHash(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .semanticHash(v) + } + }() + case 12: try { + var v: Spark_Connect_AnalyzePlanResponse.Persist? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .persist(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .persist(v) + } + }() + case 13: try { + var v: Spark_Connect_AnalyzePlanResponse.Unpersist? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .unpersist(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .unpersist(v) + } + }() + case 14: try { + var v: Spark_Connect_AnalyzePlanResponse.GetStorageLevel? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .getStorageLevel(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .getStorageLevel(v) + } + }() + case 15: try { try decoder.decodeSingularStringField(value: &self.serverSideSessionID) }() + case 16: try { + var v: Spark_Connect_AnalyzePlanResponse.JsonToDDL? + var hadOneofValue = false + if let current = self.result { + hadOneofValue = true + if case .jsonToDdl(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.result = .jsonToDdl(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + switch self.result { + case .schema?: try { + guard case .schema(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .explain?: try { + guard case .explain(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .treeString?: try { + guard case .treeString(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .isLocal?: try { + guard case .isLocal(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .isStreaming?: try { + guard case .isStreaming(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case .inputFiles?: try { + guard case .inputFiles(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case .sparkVersion?: try { + guard case .sparkVersion(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + }() + case .ddlParse?: try { + guard case .ddlParse(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 9) + }() + case .sameSemantics?: try { + guard case .sameSemantics(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 10) + }() + case .semanticHash?: try { + guard case .semanticHash(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 11) + }() + case .persist?: try { + guard case .persist(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 12) + }() + case .unpersist?: try { + guard case .unpersist(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 13) + }() + case .getStorageLevel?: try { + guard case .getStorageLevel(let v)? = self.result else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 14) + }() + default: break + } + if !self.serverSideSessionID.isEmpty { + try visitor.visitSingularStringField(value: self.serverSideSessionID, fieldNumber: 15) + } + try { if case .jsonToDdl(let v)? = self.result { + try visitor.visitSingularMessageField(value: v, fieldNumber: 16) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse, rhs: Spark_Connect_AnalyzePlanResponse) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs.serverSideSessionID != rhs.serverSideSessionID {return false} + if lhs.result != rhs.result {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.Schema: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".Schema" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "schema"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._schema) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._schema { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.Schema, rhs: Spark_Connect_AnalyzePlanResponse.Schema) -> Bool { + if lhs._schema != rhs._schema {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.Explain: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".Explain" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "explain_string"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.explainString) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.explainString.isEmpty { + try visitor.visitSingularStringField(value: self.explainString, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.Explain, rhs: Spark_Connect_AnalyzePlanResponse.Explain) -> Bool { + if lhs.explainString != rhs.explainString {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.TreeString: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".TreeString" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "tree_string"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.treeString) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.treeString.isEmpty { + try visitor.visitSingularStringField(value: self.treeString, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.TreeString, rhs: Spark_Connect_AnalyzePlanResponse.TreeString) -> Bool { + if lhs.treeString != rhs.treeString {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.IsLocal: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".IsLocal" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "is_local"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.isLocal) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.isLocal != false { + try visitor.visitSingularBoolField(value: self.isLocal, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.IsLocal, rhs: Spark_Connect_AnalyzePlanResponse.IsLocal) -> Bool { + if lhs.isLocal != rhs.isLocal {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.IsStreaming: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".IsStreaming" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "is_streaming"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.isStreaming) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.isStreaming != false { + try visitor.visitSingularBoolField(value: self.isStreaming, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.IsStreaming, rhs: Spark_Connect_AnalyzePlanResponse.IsStreaming) -> Bool { + if lhs.isStreaming != rhs.isStreaming {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.InputFiles: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".InputFiles" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "files"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.files) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.files.isEmpty { + try visitor.visitRepeatedStringField(value: self.files, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.InputFiles, rhs: Spark_Connect_AnalyzePlanResponse.InputFiles) -> Bool { + if lhs.files != rhs.files {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.SparkVersion: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".SparkVersion" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "version"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.version) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.version.isEmpty { + try visitor.visitSingularStringField(value: self.version, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.SparkVersion, rhs: Spark_Connect_AnalyzePlanResponse.SparkVersion) -> Bool { + if lhs.version != rhs.version {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.DDLParse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".DDLParse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "parsed"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._parsed) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._parsed { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.DDLParse, rhs: Spark_Connect_AnalyzePlanResponse.DDLParse) -> Bool { + if lhs._parsed != rhs._parsed {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.SameSemantics: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".SameSemantics" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "result"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.result) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.result != false { + try visitor.visitSingularBoolField(value: self.result, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.SameSemantics, rhs: Spark_Connect_AnalyzePlanResponse.SameSemantics) -> Bool { + if lhs.result != rhs.result {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.SemanticHash: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".SemanticHash" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "result"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.result) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.result != 0 { + try visitor.visitSingularInt32Field(value: self.result, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.SemanticHash, rhs: Spark_Connect_AnalyzePlanResponse.SemanticHash) -> Bool { + if lhs.result != rhs.result {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.Persist: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".Persist" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.Persist, rhs: Spark_Connect_AnalyzePlanResponse.Persist) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.Unpersist: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".Unpersist" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.Unpersist, rhs: Spark_Connect_AnalyzePlanResponse.Unpersist) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.GetStorageLevel: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".GetStorageLevel" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "storage_level"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._storageLevel) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._storageLevel { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.GetStorageLevel, rhs: Spark_Connect_AnalyzePlanResponse.GetStorageLevel) -> Bool { + if lhs._storageLevel != rhs._storageLevel {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AnalyzePlanResponse.JsonToDDL: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AnalyzePlanResponse.protoMessageName + ".JsonToDDL" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "ddl_string"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.ddlString) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.ddlString.isEmpty { + try visitor.visitSingularStringField(value: self.ddlString, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AnalyzePlanResponse.JsonToDDL, rhs: Spark_Connect_AnalyzePlanResponse.JsonToDDL) -> Bool { + if lhs.ddlString != rhs.ddlString {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ExecutePlanRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 8: .standard(proto: "client_observed_server_side_session_id"), + 2: .standard(proto: "user_context"), + 6: .standard(proto: "operation_id"), + 3: .same(proto: "plan"), + 4: .standard(proto: "client_type"), + 5: .standard(proto: "request_options"), + 7: .same(proto: "tags"), + ] + + fileprivate class _StorageClass { + var _sessionID: String = String() + var _clientObservedServerSideSessionID: String? = nil + var _userContext: Spark_Connect_UserContext? = nil + var _operationID: String? = nil + var _plan: Spark_Connect_Plan? = nil + var _clientType: String? = nil + var _requestOptions: [Spark_Connect_ExecutePlanRequest.RequestOption] = [] + var _tags: [String] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _sessionID = source._sessionID + _clientObservedServerSideSessionID = source._clientObservedServerSideSessionID + _userContext = source._userContext + _operationID = source._operationID + _plan = source._plan + _clientType = source._clientType + _requestOptions = source._requestOptions + _tags = source._tags + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &_storage._sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._userContext) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._plan) }() + case 4: try { try decoder.decodeSingularStringField(value: &_storage._clientType) }() + case 5: try { try decoder.decodeRepeatedMessageField(value: &_storage._requestOptions) }() + case 6: try { try decoder.decodeSingularStringField(value: &_storage._operationID) }() + case 7: try { try decoder.decodeRepeatedStringField(value: &_storage._tags) }() + case 8: try { try decoder.decodeSingularStringField(value: &_storage._clientObservedServerSideSessionID) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !_storage._sessionID.isEmpty { + try visitor.visitSingularStringField(value: _storage._sessionID, fieldNumber: 1) + } + try { if let v = _storage._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = _storage._plan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + try { if let v = _storage._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + if !_storage._requestOptions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._requestOptions, fieldNumber: 5) + } + try { if let v = _storage._operationID { + try visitor.visitSingularStringField(value: v, fieldNumber: 6) + } }() + if !_storage._tags.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._tags, fieldNumber: 7) + } + try { if let v = _storage._clientObservedServerSideSessionID { + try visitor.visitSingularStringField(value: v, fieldNumber: 8) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanRequest, rhs: Spark_Connect_ExecutePlanRequest) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._sessionID != rhs_storage._sessionID {return false} + if _storage._clientObservedServerSideSessionID != rhs_storage._clientObservedServerSideSessionID {return false} + if _storage._userContext != rhs_storage._userContext {return false} + if _storage._operationID != rhs_storage._operationID {return false} + if _storage._plan != rhs_storage._plan {return false} + if _storage._clientType != rhs_storage._clientType {return false} + if _storage._requestOptions != rhs_storage._requestOptions {return false} + if _storage._tags != rhs_storage._tags {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanRequest.RequestOption: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanRequest.protoMessageName + ".RequestOption" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "reattach_options"), + 999: .same(proto: "extension"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_ReattachOptions? + var hadOneofValue = false + if let current = self.requestOption { + hadOneofValue = true + if case .reattachOptions(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.requestOption = .reattachOptions(v) + } + }() + case 999: try { + var v: SwiftProtobuf.Google_Protobuf_Any? + var hadOneofValue = false + if let current = self.requestOption { + hadOneofValue = true + if case .extension(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.requestOption = .extension(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.requestOption { + case .reattachOptions?: try { + guard case .reattachOptions(let v)? = self.requestOption else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .extension?: try { + guard case .extension(let v)? = self.requestOption else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 999) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanRequest.RequestOption, rhs: Spark_Connect_ExecutePlanRequest.RequestOption) -> Bool { + if lhs.requestOption != rhs.requestOption {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ExecutePlanResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 15: .standard(proto: "server_side_session_id"), + 12: .standard(proto: "operation_id"), + 13: .standard(proto: "response_id"), + 2: .standard(proto: "arrow_batch"), + 5: .standard(proto: "sql_command_result"), + 8: .standard(proto: "write_stream_operation_start_result"), + 9: .standard(proto: "streaming_query_command_result"), + 10: .standard(proto: "get_resources_command_result"), + 11: .standard(proto: "streaming_query_manager_command_result"), + 16: .standard(proto: "streaming_query_listener_events_result"), + 14: .standard(proto: "result_complete"), + 17: .standard(proto: "create_resource_profile_command_result"), + 18: .standard(proto: "execution_progress"), + 19: .standard(proto: "checkpoint_command_result"), + 20: .standard(proto: "ml_command_result"), + 999: .same(proto: "extension"), + 4: .same(proto: "metrics"), + 6: .standard(proto: "observed_metrics"), + 7: .same(proto: "schema"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { + var v: Spark_Connect_ExecutePlanResponse.ArrowBatch? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .arrowBatch(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .arrowBatch(v) + } + }() + case 4: try { try decoder.decodeSingularMessageField(value: &self._metrics) }() + case 5: try { + var v: Spark_Connect_ExecutePlanResponse.SqlCommandResult? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .sqlCommandResult(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .sqlCommandResult(v) + } + }() + case 6: try { try decoder.decodeRepeatedMessageField(value: &self.observedMetrics) }() + case 7: try { try decoder.decodeSingularMessageField(value: &self._schema) }() + case 8: try { + var v: Spark_Connect_WriteStreamOperationStartResult? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .writeStreamOperationStartResult(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .writeStreamOperationStartResult(v) + } + }() + case 9: try { + var v: Spark_Connect_StreamingQueryCommandResult? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .streamingQueryCommandResult(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .streamingQueryCommandResult(v) + } + }() + case 10: try { + var v: Spark_Connect_GetResourcesCommandResult? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .getResourcesCommandResult(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .getResourcesCommandResult(v) + } + }() + case 11: try { + var v: Spark_Connect_StreamingQueryManagerCommandResult? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .streamingQueryManagerCommandResult(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .streamingQueryManagerCommandResult(v) + } + }() + case 12: try { try decoder.decodeSingularStringField(value: &self.operationID) }() + case 13: try { try decoder.decodeSingularStringField(value: &self.responseID) }() + case 14: try { + var v: Spark_Connect_ExecutePlanResponse.ResultComplete? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .resultComplete(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .resultComplete(v) + } + }() + case 15: try { try decoder.decodeSingularStringField(value: &self.serverSideSessionID) }() + case 16: try { + var v: Spark_Connect_StreamingQueryListenerEventsResult? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .streamingQueryListenerEventsResult(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .streamingQueryListenerEventsResult(v) + } + }() + case 17: try { + var v: Spark_Connect_CreateResourceProfileCommandResult? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .createResourceProfileCommandResult(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .createResourceProfileCommandResult(v) + } + }() + case 18: try { + var v: Spark_Connect_ExecutePlanResponse.ExecutionProgress? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .executionProgress(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .executionProgress(v) + } + }() + case 19: try { + var v: Spark_Connect_CheckpointCommandResult? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .checkpointCommandResult(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .checkpointCommandResult(v) + } + }() + case 20: try { + var v: Spark_Connect_MlCommandResult? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .mlCommandResult(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .mlCommandResult(v) + } + }() + case 999: try { + var v: SwiftProtobuf.Google_Protobuf_Any? + var hadOneofValue = false + if let current = self.responseType { + hadOneofValue = true + if case .extension(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.responseType = .extension(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if case .arrowBatch(let v)? = self.responseType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = self._metrics { + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + } }() + try { if case .sqlCommandResult(let v)? = self.responseType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + } }() + if !self.observedMetrics.isEmpty { + try visitor.visitRepeatedMessageField(value: self.observedMetrics, fieldNumber: 6) + } + try { if let v = self._schema { + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + } }() + switch self.responseType { + case .writeStreamOperationStartResult?: try { + guard case .writeStreamOperationStartResult(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + }() + case .streamingQueryCommandResult?: try { + guard case .streamingQueryCommandResult(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 9) + }() + case .getResourcesCommandResult?: try { + guard case .getResourcesCommandResult(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 10) + }() + case .streamingQueryManagerCommandResult?: try { + guard case .streamingQueryManagerCommandResult(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 11) + }() + default: break + } + if !self.operationID.isEmpty { + try visitor.visitSingularStringField(value: self.operationID, fieldNumber: 12) + } + if !self.responseID.isEmpty { + try visitor.visitSingularStringField(value: self.responseID, fieldNumber: 13) + } + try { if case .resultComplete(let v)? = self.responseType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 14) + } }() + if !self.serverSideSessionID.isEmpty { + try visitor.visitSingularStringField(value: self.serverSideSessionID, fieldNumber: 15) + } + switch self.responseType { + case .streamingQueryListenerEventsResult?: try { + guard case .streamingQueryListenerEventsResult(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 16) + }() + case .createResourceProfileCommandResult?: try { + guard case .createResourceProfileCommandResult(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 17) + }() + case .executionProgress?: try { + guard case .executionProgress(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 18) + }() + case .checkpointCommandResult?: try { + guard case .checkpointCommandResult(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 19) + }() + case .mlCommandResult?: try { + guard case .mlCommandResult(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 20) + }() + case .extension?: try { + guard case .extension(let v)? = self.responseType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 999) + }() + default: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse, rhs: Spark_Connect_ExecutePlanResponse) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs.serverSideSessionID != rhs.serverSideSessionID {return false} + if lhs.operationID != rhs.operationID {return false} + if lhs.responseID != rhs.responseID {return false} + if lhs.responseType != rhs.responseType {return false} + if lhs._metrics != rhs._metrics {return false} + if lhs.observedMetrics != rhs.observedMetrics {return false} + if lhs._schema != rhs._schema {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse.SqlCommandResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanResponse.protoMessageName + ".SqlCommandResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "relation"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._relation) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._relation { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse.SqlCommandResult, rhs: Spark_Connect_ExecutePlanResponse.SqlCommandResult) -> Bool { + if lhs._relation != rhs._relation {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse.ArrowBatch: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanResponse.protoMessageName + ".ArrowBatch" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "row_count"), + 2: .same(proto: "data"), + 3: .standard(proto: "start_offset"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt64Field(value: &self.rowCount) }() + case 2: try { try decoder.decodeSingularBytesField(value: &self.data) }() + case 3: try { try decoder.decodeSingularInt64Field(value: &self._startOffset) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if self.rowCount != 0 { + try visitor.visitSingularInt64Field(value: self.rowCount, fieldNumber: 1) + } + if !self.data.isEmpty { + try visitor.visitSingularBytesField(value: self.data, fieldNumber: 2) + } + try { if let v = self._startOffset { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse.ArrowBatch, rhs: Spark_Connect_ExecutePlanResponse.ArrowBatch) -> Bool { + if lhs.rowCount != rhs.rowCount {return false} + if lhs.data != rhs.data {return false} + if lhs._startOffset != rhs._startOffset {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse.Metrics: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanResponse.protoMessageName + ".Metrics" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "metrics"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.metrics) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.metrics.isEmpty { + try visitor.visitRepeatedMessageField(value: self.metrics, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse.Metrics, rhs: Spark_Connect_ExecutePlanResponse.Metrics) -> Bool { + if lhs.metrics != rhs.metrics {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse.Metrics.MetricObject: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanResponse.Metrics.protoMessageName + ".MetricObject" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .standard(proto: "plan_id"), + 3: .same(proto: "parent"), + 4: .standard(proto: "execution_metrics"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self.planID) }() + case 3: try { try decoder.decodeSingularInt64Field(value: &self.parent) }() + case 4: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.executionMetrics) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + if self.planID != 0 { + try visitor.visitSingularInt64Field(value: self.planID, fieldNumber: 2) + } + if self.parent != 0 { + try visitor.visitSingularInt64Field(value: self.parent, fieldNumber: 3) + } + if !self.executionMetrics.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.executionMetrics, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse.Metrics.MetricObject, rhs: Spark_Connect_ExecutePlanResponse.Metrics.MetricObject) -> Bool { + if lhs.name != rhs.name {return false} + if lhs.planID != rhs.planID {return false} + if lhs.parent != rhs.parent {return false} + if lhs.executionMetrics != rhs.executionMetrics {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse.Metrics.MetricValue: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanResponse.Metrics.protoMessageName + ".MetricValue" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .same(proto: "value"), + 3: .standard(proto: "metric_type"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self.value) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.metricType) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + if self.value != 0 { + try visitor.visitSingularInt64Field(value: self.value, fieldNumber: 2) + } + if !self.metricType.isEmpty { + try visitor.visitSingularStringField(value: self.metricType, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse.Metrics.MetricValue, rhs: Spark_Connect_ExecutePlanResponse.Metrics.MetricValue) -> Bool { + if lhs.name != rhs.name {return false} + if lhs.value != rhs.value {return false} + if lhs.metricType != rhs.metricType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse.ObservedMetrics: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanResponse.protoMessageName + ".ObservedMetrics" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .same(proto: "values"), + 3: .same(proto: "keys"), + 4: .standard(proto: "plan_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.values) }() + case 3: try { try decoder.decodeRepeatedStringField(value: &self.keys) }() + case 4: try { try decoder.decodeSingularInt64Field(value: &self.planID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + if !self.values.isEmpty { + try visitor.visitRepeatedMessageField(value: self.values, fieldNumber: 2) + } + if !self.keys.isEmpty { + try visitor.visitRepeatedStringField(value: self.keys, fieldNumber: 3) + } + if self.planID != 0 { + try visitor.visitSingularInt64Field(value: self.planID, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse.ObservedMetrics, rhs: Spark_Connect_ExecutePlanResponse.ObservedMetrics) -> Bool { + if lhs.name != rhs.name {return false} + if lhs.values != rhs.values {return false} + if lhs.keys != rhs.keys {return false} + if lhs.planID != rhs.planID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse.ResultComplete: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanResponse.protoMessageName + ".ResultComplete" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse.ResultComplete, rhs: Spark_Connect_ExecutePlanResponse.ResultComplete) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse.ExecutionProgress: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanResponse.protoMessageName + ".ExecutionProgress" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "stages"), + 2: .standard(proto: "num_inflight_tasks"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.stages) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self.numInflightTasks) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.stages.isEmpty { + try visitor.visitRepeatedMessageField(value: self.stages, fieldNumber: 1) + } + if self.numInflightTasks != 0 { + try visitor.visitSingularInt64Field(value: self.numInflightTasks, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse.ExecutionProgress, rhs: Spark_Connect_ExecutePlanResponse.ExecutionProgress) -> Bool { + if lhs.stages != rhs.stages {return false} + if lhs.numInflightTasks != rhs.numInflightTasks {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutePlanResponse.ExecutionProgress.StageInfo: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ExecutePlanResponse.ExecutionProgress.protoMessageName + ".StageInfo" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "stage_id"), + 2: .standard(proto: "num_tasks"), + 3: .standard(proto: "num_completed_tasks"), + 4: .standard(proto: "input_bytes_read"), + 5: .same(proto: "done"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt64Field(value: &self.stageID) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self.numTasks) }() + case 3: try { try decoder.decodeSingularInt64Field(value: &self.numCompletedTasks) }() + case 4: try { try decoder.decodeSingularInt64Field(value: &self.inputBytesRead) }() + case 5: try { try decoder.decodeSingularBoolField(value: &self.done) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.stageID != 0 { + try visitor.visitSingularInt64Field(value: self.stageID, fieldNumber: 1) + } + if self.numTasks != 0 { + try visitor.visitSingularInt64Field(value: self.numTasks, fieldNumber: 2) + } + if self.numCompletedTasks != 0 { + try visitor.visitSingularInt64Field(value: self.numCompletedTasks, fieldNumber: 3) + } + if self.inputBytesRead != 0 { + try visitor.visitSingularInt64Field(value: self.inputBytesRead, fieldNumber: 4) + } + if self.done != false { + try visitor.visitSingularBoolField(value: self.done, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutePlanResponse.ExecutionProgress.StageInfo, rhs: Spark_Connect_ExecutePlanResponse.ExecutionProgress.StageInfo) -> Bool { + if lhs.stageID != rhs.stageID {return false} + if lhs.numTasks != rhs.numTasks {return false} + if lhs.numCompletedTasks != rhs.numCompletedTasks {return false} + if lhs.inputBytesRead != rhs.inputBytesRead {return false} + if lhs.done != rhs.done {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_KeyValue: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".KeyValue" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "key"), + 2: .same(proto: "value"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.key) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._value) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.key.isEmpty { + try visitor.visitSingularStringField(value: self.key, fieldNumber: 1) + } + try { if let v = self._value { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_KeyValue, rhs: Spark_Connect_KeyValue) -> Bool { + if lhs.key != rhs.key {return false} + if lhs._value != rhs._value {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ConfigRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 8: .standard(proto: "client_observed_server_side_session_id"), + 2: .standard(proto: "user_context"), + 3: .same(proto: "operation"), + 4: .standard(proto: "client_type"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._userContext) }() + case 3: try { try decoder.decodeSingularMessageField(value: &self._operation) }() + case 4: try { try decoder.decodeSingularStringField(value: &self._clientType) }() + case 8: try { try decoder.decodeSingularStringField(value: &self._clientObservedServerSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if let v = self._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = self._operation { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + try { if let v = self._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + try { if let v = self._clientObservedServerSideSessionID { + try visitor.visitSingularStringField(value: v, fieldNumber: 8) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigRequest, rhs: Spark_Connect_ConfigRequest) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs._clientObservedServerSideSessionID != rhs._clientObservedServerSideSessionID {return false} + if lhs._userContext != rhs._userContext {return false} + if lhs._operation != rhs._operation {return false} + if lhs._clientType != rhs._clientType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigRequest.Operation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ConfigRequest.protoMessageName + ".Operation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "set"), + 2: .same(proto: "get"), + 3: .standard(proto: "get_with_default"), + 4: .standard(proto: "get_option"), + 5: .standard(proto: "get_all"), + 6: .same(proto: "unset"), + 7: .standard(proto: "is_modifiable"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_ConfigRequest.Set? + var hadOneofValue = false + if let current = self.opType { + hadOneofValue = true + if case .set(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.opType = .set(v) + } + }() + case 2: try { + var v: Spark_Connect_ConfigRequest.Get? + var hadOneofValue = false + if let current = self.opType { + hadOneofValue = true + if case .get(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.opType = .get(v) + } + }() + case 3: try { + var v: Spark_Connect_ConfigRequest.GetWithDefault? + var hadOneofValue = false + if let current = self.opType { + hadOneofValue = true + if case .getWithDefault(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.opType = .getWithDefault(v) + } + }() + case 4: try { + var v: Spark_Connect_ConfigRequest.GetOption? + var hadOneofValue = false + if let current = self.opType { + hadOneofValue = true + if case .getOption(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.opType = .getOption(v) + } + }() + case 5: try { + var v: Spark_Connect_ConfigRequest.GetAll? + var hadOneofValue = false + if let current = self.opType { + hadOneofValue = true + if case .getAll(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.opType = .getAll(v) + } + }() + case 6: try { + var v: Spark_Connect_ConfigRequest.Unset? + var hadOneofValue = false + if let current = self.opType { + hadOneofValue = true + if case .unset(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.opType = .unset(v) + } + }() + case 7: try { + var v: Spark_Connect_ConfigRequest.IsModifiable? + var hadOneofValue = false + if let current = self.opType { + hadOneofValue = true + if case .isModifiable(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.opType = .isModifiable(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.opType { + case .set?: try { + guard case .set(let v)? = self.opType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .get?: try { + guard case .get(let v)? = self.opType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .getWithDefault?: try { + guard case .getWithDefault(let v)? = self.opType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .getOption?: try { + guard case .getOption(let v)? = self.opType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .getAll?: try { + guard case .getAll(let v)? = self.opType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .unset?: try { + guard case .unset(let v)? = self.opType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case .isModifiable?: try { + guard case .isModifiable(let v)? = self.opType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigRequest.Operation, rhs: Spark_Connect_ConfigRequest.Operation) -> Bool { + if lhs.opType != rhs.opType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigRequest.Set: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ConfigRequest.protoMessageName + ".Set" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "pairs"), + 2: .same(proto: "silent"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.pairs) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self._silent) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.pairs.isEmpty { + try visitor.visitRepeatedMessageField(value: self.pairs, fieldNumber: 1) + } + try { if let v = self._silent { + try visitor.visitSingularBoolField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigRequest.Set, rhs: Spark_Connect_ConfigRequest.Set) -> Bool { + if lhs.pairs != rhs.pairs {return false} + if lhs._silent != rhs._silent {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigRequest.Get: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ConfigRequest.protoMessageName + ".Get" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "keys"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.keys) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.keys.isEmpty { + try visitor.visitRepeatedStringField(value: self.keys, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigRequest.Get, rhs: Spark_Connect_ConfigRequest.Get) -> Bool { + if lhs.keys != rhs.keys {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigRequest.GetWithDefault: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ConfigRequest.protoMessageName + ".GetWithDefault" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "pairs"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.pairs) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.pairs.isEmpty { + try visitor.visitRepeatedMessageField(value: self.pairs, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigRequest.GetWithDefault, rhs: Spark_Connect_ConfigRequest.GetWithDefault) -> Bool { + if lhs.pairs != rhs.pairs {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigRequest.GetOption: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ConfigRequest.protoMessageName + ".GetOption" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "keys"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.keys) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.keys.isEmpty { + try visitor.visitRepeatedStringField(value: self.keys, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigRequest.GetOption, rhs: Spark_Connect_ConfigRequest.GetOption) -> Bool { + if lhs.keys != rhs.keys {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigRequest.GetAll: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ConfigRequest.protoMessageName + ".GetAll" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "prefix"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._prefix) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._prefix { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigRequest.GetAll, rhs: Spark_Connect_ConfigRequest.GetAll) -> Bool { + if lhs._prefix != rhs._prefix {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigRequest.Unset: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ConfigRequest.protoMessageName + ".Unset" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "keys"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.keys) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.keys.isEmpty { + try visitor.visitRepeatedStringField(value: self.keys, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigRequest.Unset, rhs: Spark_Connect_ConfigRequest.Unset) -> Bool { + if lhs.keys != rhs.keys {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigRequest.IsModifiable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ConfigRequest.protoMessageName + ".IsModifiable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "keys"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.keys) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.keys.isEmpty { + try visitor.visitRepeatedStringField(value: self.keys, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigRequest.IsModifiable, rhs: Spark_Connect_ConfigRequest.IsModifiable) -> Bool { + if lhs.keys != rhs.keys {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ConfigResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ConfigResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 4: .standard(proto: "server_side_session_id"), + 2: .same(proto: "pairs"), + 3: .same(proto: "warnings"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.pairs) }() + case 3: try { try decoder.decodeRepeatedStringField(value: &self.warnings) }() + case 4: try { try decoder.decodeSingularStringField(value: &self.serverSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + if !self.pairs.isEmpty { + try visitor.visitRepeatedMessageField(value: self.pairs, fieldNumber: 2) + } + if !self.warnings.isEmpty { + try visitor.visitRepeatedStringField(value: self.warnings, fieldNumber: 3) + } + if !self.serverSideSessionID.isEmpty { + try visitor.visitSingularStringField(value: self.serverSideSessionID, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ConfigResponse, rhs: Spark_Connect_ConfigResponse) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs.serverSideSessionID != rhs.serverSideSessionID {return false} + if lhs.pairs != rhs.pairs {return false} + if lhs.warnings != rhs.warnings {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AddArtifactsRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".AddArtifactsRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 2: .standard(proto: "user_context"), + 7: .standard(proto: "client_observed_server_side_session_id"), + 6: .standard(proto: "client_type"), + 3: .same(proto: "batch"), + 4: .standard(proto: "begin_chunk"), + 5: .same(proto: "chunk"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._userContext) }() + case 3: try { + var v: Spark_Connect_AddArtifactsRequest.Batch? + var hadOneofValue = false + if let current = self.payload { + hadOneofValue = true + if case .batch(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.payload = .batch(v) + } + }() + case 4: try { + var v: Spark_Connect_AddArtifactsRequest.BeginChunkedArtifact? + var hadOneofValue = false + if let current = self.payload { + hadOneofValue = true + if case .beginChunk(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.payload = .beginChunk(v) + } + }() + case 5: try { + var v: Spark_Connect_AddArtifactsRequest.ArtifactChunk? + var hadOneofValue = false + if let current = self.payload { + hadOneofValue = true + if case .chunk(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.payload = .chunk(v) + } + }() + case 6: try { try decoder.decodeSingularStringField(value: &self._clientType) }() + case 7: try { try decoder.decodeSingularStringField(value: &self._clientObservedServerSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if let v = self._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + switch self.payload { + case .batch?: try { + guard case .batch(let v)? = self.payload else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .beginChunk?: try { + guard case .beginChunk(let v)? = self.payload else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .chunk?: try { + guard case .chunk(let v)? = self.payload else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case nil: break + } + try { if let v = self._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 6) + } }() + try { if let v = self._clientObservedServerSideSessionID { + try visitor.visitSingularStringField(value: v, fieldNumber: 7) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AddArtifactsRequest, rhs: Spark_Connect_AddArtifactsRequest) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs._userContext != rhs._userContext {return false} + if lhs._clientObservedServerSideSessionID != rhs._clientObservedServerSideSessionID {return false} + if lhs._clientType != rhs._clientType {return false} + if lhs.payload != rhs.payload {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AddArtifactsRequest.ArtifactChunk: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AddArtifactsRequest.protoMessageName + ".ArtifactChunk" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "data"), + 2: .same(proto: "crc"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBytesField(value: &self.data) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self.crc) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.data.isEmpty { + try visitor.visitSingularBytesField(value: self.data, fieldNumber: 1) + } + if self.crc != 0 { + try visitor.visitSingularInt64Field(value: self.crc, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AddArtifactsRequest.ArtifactChunk, rhs: Spark_Connect_AddArtifactsRequest.ArtifactChunk) -> Bool { + if lhs.data != rhs.data {return false} + if lhs.crc != rhs.crc {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AddArtifactsRequest.SingleChunkArtifact: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AddArtifactsRequest.protoMessageName + ".SingleChunkArtifact" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .same(proto: "data"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._data) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + try { if let v = self._data { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AddArtifactsRequest.SingleChunkArtifact, rhs: Spark_Connect_AddArtifactsRequest.SingleChunkArtifact) -> Bool { + if lhs.name != rhs.name {return false} + if lhs._data != rhs._data {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AddArtifactsRequest.Batch: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AddArtifactsRequest.protoMessageName + ".Batch" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "artifacts"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.artifacts) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.artifacts.isEmpty { + try visitor.visitRepeatedMessageField(value: self.artifacts, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AddArtifactsRequest.Batch, rhs: Spark_Connect_AddArtifactsRequest.Batch) -> Bool { + if lhs.artifacts != rhs.artifacts {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AddArtifactsRequest.BeginChunkedArtifact: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AddArtifactsRequest.protoMessageName + ".BeginChunkedArtifact" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .standard(proto: "total_bytes"), + 3: .standard(proto: "num_chunks"), + 4: .standard(proto: "initial_chunk"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self.totalBytes) }() + case 3: try { try decoder.decodeSingularInt64Field(value: &self.numChunks) }() + case 4: try { try decoder.decodeSingularMessageField(value: &self._initialChunk) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + if self.totalBytes != 0 { + try visitor.visitSingularInt64Field(value: self.totalBytes, fieldNumber: 2) + } + if self.numChunks != 0 { + try visitor.visitSingularInt64Field(value: self.numChunks, fieldNumber: 3) + } + try { if let v = self._initialChunk { + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AddArtifactsRequest.BeginChunkedArtifact, rhs: Spark_Connect_AddArtifactsRequest.BeginChunkedArtifact) -> Bool { + if lhs.name != rhs.name {return false} + if lhs.totalBytes != rhs.totalBytes {return false} + if lhs.numChunks != rhs.numChunks {return false} + if lhs._initialChunk != rhs._initialChunk {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AddArtifactsResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".AddArtifactsResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 2: .standard(proto: "session_id"), + 3: .standard(proto: "server_side_session_id"), + 1: .same(proto: "artifacts"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.artifacts) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.serverSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.artifacts.isEmpty { + try visitor.visitRepeatedMessageField(value: self.artifacts, fieldNumber: 1) + } + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 2) + } + if !self.serverSideSessionID.isEmpty { + try visitor.visitSingularStringField(value: self.serverSideSessionID, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AddArtifactsResponse, rhs: Spark_Connect_AddArtifactsResponse) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs.serverSideSessionID != rhs.serverSideSessionID {return false} + if lhs.artifacts != rhs.artifacts {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_AddArtifactsResponse.ArtifactSummary: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_AddArtifactsResponse.protoMessageName + ".ArtifactSummary" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .standard(proto: "is_crc_successful"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self.isCrcSuccessful) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + if self.isCrcSuccessful != false { + try visitor.visitSingularBoolField(value: self.isCrcSuccessful, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AddArtifactsResponse.ArtifactSummary, rhs: Spark_Connect_AddArtifactsResponse.ArtifactSummary) -> Bool { + if lhs.name != rhs.name {return false} + if lhs.isCrcSuccessful != rhs.isCrcSuccessful {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ArtifactStatusesRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ArtifactStatusesRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 5: .standard(proto: "client_observed_server_side_session_id"), + 2: .standard(proto: "user_context"), + 3: .standard(proto: "client_type"), + 4: .same(proto: "names"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._userContext) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._clientType) }() + case 4: try { try decoder.decodeRepeatedStringField(value: &self.names) }() + case 5: try { try decoder.decodeSingularStringField(value: &self._clientObservedServerSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if let v = self._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = self._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + if !self.names.isEmpty { + try visitor.visitRepeatedStringField(value: self.names, fieldNumber: 4) + } + try { if let v = self._clientObservedServerSideSessionID { + try visitor.visitSingularStringField(value: v, fieldNumber: 5) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ArtifactStatusesRequest, rhs: Spark_Connect_ArtifactStatusesRequest) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs._clientObservedServerSideSessionID != rhs._clientObservedServerSideSessionID {return false} + if lhs._userContext != rhs._userContext {return false} + if lhs._clientType != rhs._clientType {return false} + if lhs.names != rhs.names {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ArtifactStatusesResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ArtifactStatusesResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 2: .standard(proto: "session_id"), + 3: .standard(proto: "server_side_session_id"), + 1: .same(proto: "statuses"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.statuses) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.serverSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.statuses.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.statuses, fieldNumber: 1) + } + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 2) + } + if !self.serverSideSessionID.isEmpty { + try visitor.visitSingularStringField(value: self.serverSideSessionID, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ArtifactStatusesResponse, rhs: Spark_Connect_ArtifactStatusesResponse) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs.serverSideSessionID != rhs.serverSideSessionID {return false} + if lhs.statuses != rhs.statuses {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ArtifactStatusesResponse.ArtifactStatus: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ArtifactStatusesResponse.protoMessageName + ".ArtifactStatus" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "exists"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.exists) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.exists != false { + try visitor.visitSingularBoolField(value: self.exists, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ArtifactStatusesResponse.ArtifactStatus, rhs: Spark_Connect_ArtifactStatusesResponse.ArtifactStatus) -> Bool { + if lhs.exists != rhs.exists {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_InterruptRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".InterruptRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 7: .standard(proto: "client_observed_server_side_session_id"), + 2: .standard(proto: "user_context"), + 3: .standard(proto: "client_type"), + 4: .standard(proto: "interrupt_type"), + 5: .standard(proto: "operation_tag"), + 6: .standard(proto: "operation_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._userContext) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._clientType) }() + case 4: try { try decoder.decodeSingularEnumField(value: &self.interruptType) }() + case 5: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if self.interrupt != nil {try decoder.handleConflictingOneOf()} + self.interrupt = .operationTag(v) + } + }() + case 6: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if self.interrupt != nil {try decoder.handleConflictingOneOf()} + self.interrupt = .operationID(v) + } + }() + case 7: try { try decoder.decodeSingularStringField(value: &self._clientObservedServerSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if let v = self._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = self._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + if self.interruptType != .unspecified { + try visitor.visitSingularEnumField(value: self.interruptType, fieldNumber: 4) + } + switch self.interrupt { + case .operationTag?: try { + guard case .operationTag(let v)? = self.interrupt else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 5) + }() + case .operationID?: try { + guard case .operationID(let v)? = self.interrupt else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 6) + }() + case nil: break + } + try { if let v = self._clientObservedServerSideSessionID { + try visitor.visitSingularStringField(value: v, fieldNumber: 7) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_InterruptRequest, rhs: Spark_Connect_InterruptRequest) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs._clientObservedServerSideSessionID != rhs._clientObservedServerSideSessionID {return false} + if lhs._userContext != rhs._userContext {return false} + if lhs._clientType != rhs._clientType {return false} + if lhs.interruptType != rhs.interruptType {return false} + if lhs.interrupt != rhs.interrupt {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_InterruptRequest.InterruptType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "INTERRUPT_TYPE_UNSPECIFIED"), + 1: .same(proto: "INTERRUPT_TYPE_ALL"), + 2: .same(proto: "INTERRUPT_TYPE_TAG"), + 3: .same(proto: "INTERRUPT_TYPE_OPERATION_ID"), + ] +} + +extension Spark_Connect_InterruptResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".InterruptResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 3: .standard(proto: "server_side_session_id"), + 2: .standard(proto: "interrupted_ids"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &self.interruptedIds) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.serverSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + if !self.interruptedIds.isEmpty { + try visitor.visitRepeatedStringField(value: self.interruptedIds, fieldNumber: 2) + } + if !self.serverSideSessionID.isEmpty { + try visitor.visitSingularStringField(value: self.serverSideSessionID, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_InterruptResponse, rhs: Spark_Connect_InterruptResponse) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs.serverSideSessionID != rhs.serverSideSessionID {return false} + if lhs.interruptedIds != rhs.interruptedIds {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ReattachOptions: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ReattachOptions" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "reattachable"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.reattachable) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.reattachable != false { + try visitor.visitSingularBoolField(value: self.reattachable, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ReattachOptions, rhs: Spark_Connect_ReattachOptions) -> Bool { + if lhs.reattachable != rhs.reattachable {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ReattachExecuteRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ReattachExecuteRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 6: .standard(proto: "client_observed_server_side_session_id"), + 2: .standard(proto: "user_context"), + 3: .standard(proto: "operation_id"), + 4: .standard(proto: "client_type"), + 5: .standard(proto: "last_response_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._userContext) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.operationID) }() + case 4: try { try decoder.decodeSingularStringField(value: &self._clientType) }() + case 5: try { try decoder.decodeSingularStringField(value: &self._lastResponseID) }() + case 6: try { try decoder.decodeSingularStringField(value: &self._clientObservedServerSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if let v = self._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if !self.operationID.isEmpty { + try visitor.visitSingularStringField(value: self.operationID, fieldNumber: 3) + } + try { if let v = self._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + try { if let v = self._lastResponseID { + try visitor.visitSingularStringField(value: v, fieldNumber: 5) + } }() + try { if let v = self._clientObservedServerSideSessionID { + try visitor.visitSingularStringField(value: v, fieldNumber: 6) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ReattachExecuteRequest, rhs: Spark_Connect_ReattachExecuteRequest) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs._clientObservedServerSideSessionID != rhs._clientObservedServerSideSessionID {return false} + if lhs._userContext != rhs._userContext {return false} + if lhs.operationID != rhs.operationID {return false} + if lhs._clientType != rhs._clientType {return false} + if lhs._lastResponseID != rhs._lastResponseID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ReleaseExecuteRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ReleaseExecuteRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 7: .standard(proto: "client_observed_server_side_session_id"), + 2: .standard(proto: "user_context"), + 3: .standard(proto: "operation_id"), + 4: .standard(proto: "client_type"), + 5: .standard(proto: "release_all"), + 6: .standard(proto: "release_until"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._userContext) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.operationID) }() + case 4: try { try decoder.decodeSingularStringField(value: &self._clientType) }() + case 5: try { + var v: Spark_Connect_ReleaseExecuteRequest.ReleaseAll? + var hadOneofValue = false + if let current = self.release { + hadOneofValue = true + if case .releaseAll(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.release = .releaseAll(v) + } + }() + case 6: try { + var v: Spark_Connect_ReleaseExecuteRequest.ReleaseUntil? + var hadOneofValue = false + if let current = self.release { + hadOneofValue = true + if case .releaseUntil(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.release = .releaseUntil(v) + } + }() + case 7: try { try decoder.decodeSingularStringField(value: &self._clientObservedServerSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if let v = self._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if !self.operationID.isEmpty { + try visitor.visitSingularStringField(value: self.operationID, fieldNumber: 3) + } + try { if let v = self._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + switch self.release { + case .releaseAll?: try { + guard case .releaseAll(let v)? = self.release else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .releaseUntil?: try { + guard case .releaseUntil(let v)? = self.release else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case nil: break + } + try { if let v = self._clientObservedServerSideSessionID { + try visitor.visitSingularStringField(value: v, fieldNumber: 7) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ReleaseExecuteRequest, rhs: Spark_Connect_ReleaseExecuteRequest) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs._clientObservedServerSideSessionID != rhs._clientObservedServerSideSessionID {return false} + if lhs._userContext != rhs._userContext {return false} + if lhs.operationID != rhs.operationID {return false} + if lhs._clientType != rhs._clientType {return false} + if lhs.release != rhs.release {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ReleaseExecuteRequest.ReleaseAll: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ReleaseExecuteRequest.protoMessageName + ".ReleaseAll" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ReleaseExecuteRequest.ReleaseAll, rhs: Spark_Connect_ReleaseExecuteRequest.ReleaseAll) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ReleaseExecuteRequest.ReleaseUntil: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_ReleaseExecuteRequest.protoMessageName + ".ReleaseUntil" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "response_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.responseID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.responseID.isEmpty { + try visitor.visitSingularStringField(value: self.responseID, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ReleaseExecuteRequest.ReleaseUntil, rhs: Spark_Connect_ReleaseExecuteRequest.ReleaseUntil) -> Bool { + if lhs.responseID != rhs.responseID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ReleaseExecuteResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ReleaseExecuteResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 3: .standard(proto: "server_side_session_id"), + 2: .standard(proto: "operation_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._operationID) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.serverSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if let v = self._operationID { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + if !self.serverSideSessionID.isEmpty { + try visitor.visitSingularStringField(value: self.serverSideSessionID, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ReleaseExecuteResponse, rhs: Spark_Connect_ReleaseExecuteResponse) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs.serverSideSessionID != rhs.serverSideSessionID {return false} + if lhs._operationID != rhs._operationID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ReleaseSessionRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ReleaseSessionRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 2: .standard(proto: "user_context"), + 3: .standard(proto: "client_type"), + 4: .standard(proto: "allow_reconnect"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._userContext) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._clientType) }() + case 4: try { try decoder.decodeSingularBoolField(value: &self.allowReconnect) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if let v = self._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = self._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + if self.allowReconnect != false { + try visitor.visitSingularBoolField(value: self.allowReconnect, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ReleaseSessionRequest, rhs: Spark_Connect_ReleaseSessionRequest) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs._userContext != rhs._userContext {return false} + if lhs._clientType != rhs._clientType {return false} + if lhs.allowReconnect != rhs.allowReconnect {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ReleaseSessionResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ReleaseSessionResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 2: .standard(proto: "server_side_session_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.serverSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + if !self.serverSideSessionID.isEmpty { + try visitor.visitSingularStringField(value: self.serverSideSessionID, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ReleaseSessionResponse, rhs: Spark_Connect_ReleaseSessionResponse) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs.serverSideSessionID != rhs.serverSideSessionID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_FetchErrorDetailsRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".FetchErrorDetailsRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "session_id"), + 5: .standard(proto: "client_observed_server_side_session_id"), + 2: .standard(proto: "user_context"), + 3: .standard(proto: "error_id"), + 4: .standard(proto: "client_type"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._userContext) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.errorID) }() + case 4: try { try decoder.decodeSingularStringField(value: &self._clientType) }() + case 5: try { try decoder.decodeSingularStringField(value: &self._clientObservedServerSideSessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 1) + } + try { if let v = self._userContext { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if !self.errorID.isEmpty { + try visitor.visitSingularStringField(value: self.errorID, fieldNumber: 3) + } + try { if let v = self._clientType { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + try { if let v = self._clientObservedServerSideSessionID { + try visitor.visitSingularStringField(value: v, fieldNumber: 5) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_FetchErrorDetailsRequest, rhs: Spark_Connect_FetchErrorDetailsRequest) -> Bool { + if lhs.sessionID != rhs.sessionID {return false} + if lhs._clientObservedServerSideSessionID != rhs._clientObservedServerSideSessionID {return false} + if lhs._userContext != rhs._userContext {return false} + if lhs.errorID != rhs.errorID {return false} + if lhs._clientType != rhs._clientType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_FetchErrorDetailsResponse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".FetchErrorDetailsResponse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 3: .standard(proto: "server_side_session_id"), + 4: .standard(proto: "session_id"), + 1: .standard(proto: "root_error_idx"), + 2: .same(proto: "errors"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self._rootErrorIdx) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.errors) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.serverSideSessionID) }() + case 4: try { try decoder.decodeSingularStringField(value: &self.sessionID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._rootErrorIdx { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 1) + } }() + if !self.errors.isEmpty { + try visitor.visitRepeatedMessageField(value: self.errors, fieldNumber: 2) + } + if !self.serverSideSessionID.isEmpty { + try visitor.visitSingularStringField(value: self.serverSideSessionID, fieldNumber: 3) + } + if !self.sessionID.isEmpty { + try visitor.visitSingularStringField(value: self.sessionID, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_FetchErrorDetailsResponse, rhs: Spark_Connect_FetchErrorDetailsResponse) -> Bool { + if lhs.serverSideSessionID != rhs.serverSideSessionID {return false} + if lhs.sessionID != rhs.sessionID {return false} + if lhs._rootErrorIdx != rhs._rootErrorIdx {return false} + if lhs.errors != rhs.errors {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_FetchErrorDetailsResponse.StackTraceElement: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_FetchErrorDetailsResponse.protoMessageName + ".StackTraceElement" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "declaring_class"), + 2: .standard(proto: "method_name"), + 3: .standard(proto: "file_name"), + 4: .standard(proto: "line_number"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.declaringClass) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.methodName) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._fileName) }() + case 4: try { try decoder.decodeSingularInt32Field(value: &self.lineNumber) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.declaringClass.isEmpty { + try visitor.visitSingularStringField(value: self.declaringClass, fieldNumber: 1) + } + if !self.methodName.isEmpty { + try visitor.visitSingularStringField(value: self.methodName, fieldNumber: 2) + } + try { if let v = self._fileName { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + if self.lineNumber != 0 { + try visitor.visitSingularInt32Field(value: self.lineNumber, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_FetchErrorDetailsResponse.StackTraceElement, rhs: Spark_Connect_FetchErrorDetailsResponse.StackTraceElement) -> Bool { + if lhs.declaringClass != rhs.declaringClass {return false} + if lhs.methodName != rhs.methodName {return false} + if lhs._fileName != rhs._fileName {return false} + if lhs.lineNumber != rhs.lineNumber {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_FetchErrorDetailsResponse.QueryContext: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_FetchErrorDetailsResponse.protoMessageName + ".QueryContext" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 10: .standard(proto: "context_type"), + 1: .standard(proto: "object_type"), + 2: .standard(proto: "object_name"), + 3: .standard(proto: "start_index"), + 4: .standard(proto: "stop_index"), + 5: .same(proto: "fragment"), + 6: .standard(proto: "call_site"), + 7: .same(proto: "summary"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.objectType) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.objectName) }() + case 3: try { try decoder.decodeSingularInt32Field(value: &self.startIndex) }() + case 4: try { try decoder.decodeSingularInt32Field(value: &self.stopIndex) }() + case 5: try { try decoder.decodeSingularStringField(value: &self.fragment) }() + case 6: try { try decoder.decodeSingularStringField(value: &self.callSite) }() + case 7: try { try decoder.decodeSingularStringField(value: &self.summary) }() + case 10: try { try decoder.decodeSingularEnumField(value: &self.contextType) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.objectType.isEmpty { + try visitor.visitSingularStringField(value: self.objectType, fieldNumber: 1) + } + if !self.objectName.isEmpty { + try visitor.visitSingularStringField(value: self.objectName, fieldNumber: 2) + } + if self.startIndex != 0 { + try visitor.visitSingularInt32Field(value: self.startIndex, fieldNumber: 3) + } + if self.stopIndex != 0 { + try visitor.visitSingularInt32Field(value: self.stopIndex, fieldNumber: 4) + } + if !self.fragment.isEmpty { + try visitor.visitSingularStringField(value: self.fragment, fieldNumber: 5) + } + if !self.callSite.isEmpty { + try visitor.visitSingularStringField(value: self.callSite, fieldNumber: 6) + } + if !self.summary.isEmpty { + try visitor.visitSingularStringField(value: self.summary, fieldNumber: 7) + } + if self.contextType != .sql { + try visitor.visitSingularEnumField(value: self.contextType, fieldNumber: 10) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_FetchErrorDetailsResponse.QueryContext, rhs: Spark_Connect_FetchErrorDetailsResponse.QueryContext) -> Bool { + if lhs.contextType != rhs.contextType {return false} + if lhs.objectType != rhs.objectType {return false} + if lhs.objectName != rhs.objectName {return false} + if lhs.startIndex != rhs.startIndex {return false} + if lhs.stopIndex != rhs.stopIndex {return false} + if lhs.fragment != rhs.fragment {return false} + if lhs.callSite != rhs.callSite {return false} + if lhs.summary != rhs.summary {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_FetchErrorDetailsResponse.QueryContext.ContextType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SQL"), + 1: .same(proto: "DATAFRAME"), + ] +} + +extension Spark_Connect_FetchErrorDetailsResponse.SparkThrowable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_FetchErrorDetailsResponse.protoMessageName + ".SparkThrowable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "error_class"), + 2: .standard(proto: "message_parameters"), + 3: .standard(proto: "query_contexts"), + 4: .standard(proto: "sql_state"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._errorClass) }() + case 2: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.messageParameters) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &self.queryContexts) }() + case 4: try { try decoder.decodeSingularStringField(value: &self._sqlState) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._errorClass { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + if !self.messageParameters.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.messageParameters, fieldNumber: 2) + } + if !self.queryContexts.isEmpty { + try visitor.visitRepeatedMessageField(value: self.queryContexts, fieldNumber: 3) + } + try { if let v = self._sqlState { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_FetchErrorDetailsResponse.SparkThrowable, rhs: Spark_Connect_FetchErrorDetailsResponse.SparkThrowable) -> Bool { + if lhs._errorClass != rhs._errorClass {return false} + if lhs.messageParameters != rhs.messageParameters {return false} + if lhs.queryContexts != rhs.queryContexts {return false} + if lhs._sqlState != rhs._sqlState {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_FetchErrorDetailsResponse.Error: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_FetchErrorDetailsResponse.protoMessageName + ".Error" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "error_type_hierarchy"), + 2: .same(proto: "message"), + 3: .standard(proto: "stack_trace"), + 4: .standard(proto: "cause_idx"), + 5: .standard(proto: "spark_throwable"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.errorTypeHierarchy) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.message) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &self.stackTrace) }() + case 4: try { try decoder.decodeSingularInt32Field(value: &self._causeIdx) }() + case 5: try { try decoder.decodeSingularMessageField(value: &self._sparkThrowable) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.errorTypeHierarchy.isEmpty { + try visitor.visitRepeatedStringField(value: self.errorTypeHierarchy, fieldNumber: 1) + } + if !self.message.isEmpty { + try visitor.visitSingularStringField(value: self.message, fieldNumber: 2) + } + if !self.stackTrace.isEmpty { + try visitor.visitRepeatedMessageField(value: self.stackTrace, fieldNumber: 3) + } + try { if let v = self._causeIdx { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 4) + } }() + try { if let v = self._sparkThrowable { + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_FetchErrorDetailsResponse.Error, rhs: Spark_Connect_FetchErrorDetailsResponse.Error) -> Bool { + if lhs.errorTypeHierarchy != rhs.errorTypeHierarchy {return false} + if lhs.message != rhs.message {return false} + if lhs.stackTrace != rhs.stackTrace {return false} + if lhs._causeIdx != rhs._causeIdx {return false} + if lhs._sparkThrowable != rhs._sparkThrowable {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CheckpointCommandResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CheckpointCommandResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "relation"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._relation) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._relation { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CheckpointCommandResult, rhs: Spark_Connect_CheckpointCommandResult) -> Bool { + if lhs._relation != rhs._relation {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Sources/SparkConnect/catalog.grpc.swift b/Sources/SparkConnect/catalog.grpc.swift new file mode 100644 index 0000000..054efa7 --- /dev/null +++ b/Sources/SparkConnect/catalog.grpc.swift @@ -0,0 +1,26 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/catalog.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/catalog.pb.swift b/Sources/SparkConnect/catalog.pb.swift new file mode 100644 index 0000000..24f19c7 --- /dev/null +++ b/Sources/SparkConnect/catalog.pb.swift @@ -0,0 +1,2309 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/catalog.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +/// Catalog messages are marked as unstable. +struct Spark_Connect_Catalog: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var catType: Spark_Connect_Catalog.OneOf_CatType? = nil + + var currentDatabase: Spark_Connect_CurrentDatabase { + get { + if case .currentDatabase(let v)? = catType {return v} + return Spark_Connect_CurrentDatabase() + } + set {catType = .currentDatabase(newValue)} + } + + var setCurrentDatabase: Spark_Connect_SetCurrentDatabase { + get { + if case .setCurrentDatabase(let v)? = catType {return v} + return Spark_Connect_SetCurrentDatabase() + } + set {catType = .setCurrentDatabase(newValue)} + } + + var listDatabases: Spark_Connect_ListDatabases { + get { + if case .listDatabases(let v)? = catType {return v} + return Spark_Connect_ListDatabases() + } + set {catType = .listDatabases(newValue)} + } + + var listTables: Spark_Connect_ListTables { + get { + if case .listTables(let v)? = catType {return v} + return Spark_Connect_ListTables() + } + set {catType = .listTables(newValue)} + } + + var listFunctions: Spark_Connect_ListFunctions { + get { + if case .listFunctions(let v)? = catType {return v} + return Spark_Connect_ListFunctions() + } + set {catType = .listFunctions(newValue)} + } + + var listColumns: Spark_Connect_ListColumns { + get { + if case .listColumns(let v)? = catType {return v} + return Spark_Connect_ListColumns() + } + set {catType = .listColumns(newValue)} + } + + var getDatabase: Spark_Connect_GetDatabase { + get { + if case .getDatabase(let v)? = catType {return v} + return Spark_Connect_GetDatabase() + } + set {catType = .getDatabase(newValue)} + } + + var getTable: Spark_Connect_GetTable { + get { + if case .getTable(let v)? = catType {return v} + return Spark_Connect_GetTable() + } + set {catType = .getTable(newValue)} + } + + var getFunction: Spark_Connect_GetFunction { + get { + if case .getFunction(let v)? = catType {return v} + return Spark_Connect_GetFunction() + } + set {catType = .getFunction(newValue)} + } + + var databaseExists: Spark_Connect_DatabaseExists { + get { + if case .databaseExists(let v)? = catType {return v} + return Spark_Connect_DatabaseExists() + } + set {catType = .databaseExists(newValue)} + } + + var tableExists: Spark_Connect_TableExists { + get { + if case .tableExists(let v)? = catType {return v} + return Spark_Connect_TableExists() + } + set {catType = .tableExists(newValue)} + } + + var functionExists: Spark_Connect_FunctionExists { + get { + if case .functionExists(let v)? = catType {return v} + return Spark_Connect_FunctionExists() + } + set {catType = .functionExists(newValue)} + } + + var createExternalTable: Spark_Connect_CreateExternalTable { + get { + if case .createExternalTable(let v)? = catType {return v} + return Spark_Connect_CreateExternalTable() + } + set {catType = .createExternalTable(newValue)} + } + + var createTable: Spark_Connect_CreateTable { + get { + if case .createTable(let v)? = catType {return v} + return Spark_Connect_CreateTable() + } + set {catType = .createTable(newValue)} + } + + var dropTempView: Spark_Connect_DropTempView { + get { + if case .dropTempView(let v)? = catType {return v} + return Spark_Connect_DropTempView() + } + set {catType = .dropTempView(newValue)} + } + + var dropGlobalTempView: Spark_Connect_DropGlobalTempView { + get { + if case .dropGlobalTempView(let v)? = catType {return v} + return Spark_Connect_DropGlobalTempView() + } + set {catType = .dropGlobalTempView(newValue)} + } + + var recoverPartitions: Spark_Connect_RecoverPartitions { + get { + if case .recoverPartitions(let v)? = catType {return v} + return Spark_Connect_RecoverPartitions() + } + set {catType = .recoverPartitions(newValue)} + } + + var isCached: Spark_Connect_IsCached { + get { + if case .isCached(let v)? = catType {return v} + return Spark_Connect_IsCached() + } + set {catType = .isCached(newValue)} + } + + var cacheTable: Spark_Connect_CacheTable { + get { + if case .cacheTable(let v)? = catType {return v} + return Spark_Connect_CacheTable() + } + set {catType = .cacheTable(newValue)} + } + + var uncacheTable: Spark_Connect_UncacheTable { + get { + if case .uncacheTable(let v)? = catType {return v} + return Spark_Connect_UncacheTable() + } + set {catType = .uncacheTable(newValue)} + } + + var clearCache_p: Spark_Connect_ClearCache { + get { + if case .clearCache_p(let v)? = catType {return v} + return Spark_Connect_ClearCache() + } + set {catType = .clearCache_p(newValue)} + } + + var refreshTable: Spark_Connect_RefreshTable { + get { + if case .refreshTable(let v)? = catType {return v} + return Spark_Connect_RefreshTable() + } + set {catType = .refreshTable(newValue)} + } + + var refreshByPath: Spark_Connect_RefreshByPath { + get { + if case .refreshByPath(let v)? = catType {return v} + return Spark_Connect_RefreshByPath() + } + set {catType = .refreshByPath(newValue)} + } + + var currentCatalog: Spark_Connect_CurrentCatalog { + get { + if case .currentCatalog(let v)? = catType {return v} + return Spark_Connect_CurrentCatalog() + } + set {catType = .currentCatalog(newValue)} + } + + var setCurrentCatalog: Spark_Connect_SetCurrentCatalog { + get { + if case .setCurrentCatalog(let v)? = catType {return v} + return Spark_Connect_SetCurrentCatalog() + } + set {catType = .setCurrentCatalog(newValue)} + } + + var listCatalogs: Spark_Connect_ListCatalogs { + get { + if case .listCatalogs(let v)? = catType {return v} + return Spark_Connect_ListCatalogs() + } + set {catType = .listCatalogs(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_CatType: Equatable, Sendable { + case currentDatabase(Spark_Connect_CurrentDatabase) + case setCurrentDatabase(Spark_Connect_SetCurrentDatabase) + case listDatabases(Spark_Connect_ListDatabases) + case listTables(Spark_Connect_ListTables) + case listFunctions(Spark_Connect_ListFunctions) + case listColumns(Spark_Connect_ListColumns) + case getDatabase(Spark_Connect_GetDatabase) + case getTable(Spark_Connect_GetTable) + case getFunction(Spark_Connect_GetFunction) + case databaseExists(Spark_Connect_DatabaseExists) + case tableExists(Spark_Connect_TableExists) + case functionExists(Spark_Connect_FunctionExists) + case createExternalTable(Spark_Connect_CreateExternalTable) + case createTable(Spark_Connect_CreateTable) + case dropTempView(Spark_Connect_DropTempView) + case dropGlobalTempView(Spark_Connect_DropGlobalTempView) + case recoverPartitions(Spark_Connect_RecoverPartitions) + case isCached(Spark_Connect_IsCached) + case cacheTable(Spark_Connect_CacheTable) + case uncacheTable(Spark_Connect_UncacheTable) + case clearCache_p(Spark_Connect_ClearCache) + case refreshTable(Spark_Connect_RefreshTable) + case refreshByPath(Spark_Connect_RefreshByPath) + case currentCatalog(Spark_Connect_CurrentCatalog) + case setCurrentCatalog(Spark_Connect_SetCurrentCatalog) + case listCatalogs(Spark_Connect_ListCatalogs) + + } + + init() {} +} + +/// See `spark.catalog.currentDatabase` +struct Spark_Connect_CurrentDatabase: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.setCurrentDatabase` +struct Spark_Connect_SetCurrentDatabase: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var dbName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.listDatabases` +struct Spark_Connect_ListDatabases: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) The pattern that the database name needs to match + var pattern: String { + get {return _pattern ?? String()} + set {_pattern = newValue} + } + /// Returns true if `pattern` has been explicitly set. + var hasPattern: Bool {return self._pattern != nil} + /// Clears the value of `pattern`. Subsequent reads from it will return its default value. + mutating func clearPattern() {self._pattern = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _pattern: String? = nil +} + +/// See `spark.catalog.listTables` +struct Spark_Connect_ListTables: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) + var dbName: String { + get {return _dbName ?? String()} + set {_dbName = newValue} + } + /// Returns true if `dbName` has been explicitly set. + var hasDbName: Bool {return self._dbName != nil} + /// Clears the value of `dbName`. Subsequent reads from it will return its default value. + mutating func clearDbName() {self._dbName = nil} + + /// (Optional) The pattern that the table name needs to match + var pattern: String { + get {return _pattern ?? String()} + set {_pattern = newValue} + } + /// Returns true if `pattern` has been explicitly set. + var hasPattern: Bool {return self._pattern != nil} + /// Clears the value of `pattern`. Subsequent reads from it will return its default value. + mutating func clearPattern() {self._pattern = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _dbName: String? = nil + fileprivate var _pattern: String? = nil +} + +/// See `spark.catalog.listFunctions` +struct Spark_Connect_ListFunctions: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) + var dbName: String { + get {return _dbName ?? String()} + set {_dbName = newValue} + } + /// Returns true if `dbName` has been explicitly set. + var hasDbName: Bool {return self._dbName != nil} + /// Clears the value of `dbName`. Subsequent reads from it will return its default value. + mutating func clearDbName() {self._dbName = nil} + + /// (Optional) The pattern that the function name needs to match + var pattern: String { + get {return _pattern ?? String()} + set {_pattern = newValue} + } + /// Returns true if `pattern` has been explicitly set. + var hasPattern: Bool {return self._pattern != nil} + /// Clears the value of `pattern`. Subsequent reads from it will return its default value. + mutating func clearPattern() {self._pattern = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _dbName: String? = nil + fileprivate var _pattern: String? = nil +} + +/// See `spark.catalog.listColumns` +struct Spark_Connect_ListColumns: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + /// (Optional) + var dbName: String { + get {return _dbName ?? String()} + set {_dbName = newValue} + } + /// Returns true if `dbName` has been explicitly set. + var hasDbName: Bool {return self._dbName != nil} + /// Clears the value of `dbName`. Subsequent reads from it will return its default value. + mutating func clearDbName() {self._dbName = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _dbName: String? = nil +} + +/// See `spark.catalog.getDatabase` +struct Spark_Connect_GetDatabase: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var dbName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.getTable` +struct Spark_Connect_GetTable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + /// (Optional) + var dbName: String { + get {return _dbName ?? String()} + set {_dbName = newValue} + } + /// Returns true if `dbName` has been explicitly set. + var hasDbName: Bool {return self._dbName != nil} + /// Clears the value of `dbName`. Subsequent reads from it will return its default value. + mutating func clearDbName() {self._dbName = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _dbName: String? = nil +} + +/// See `spark.catalog.getFunction` +struct Spark_Connect_GetFunction: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var functionName: String = String() + + /// (Optional) + var dbName: String { + get {return _dbName ?? String()} + set {_dbName = newValue} + } + /// Returns true if `dbName` has been explicitly set. + var hasDbName: Bool {return self._dbName != nil} + /// Clears the value of `dbName`. Subsequent reads from it will return its default value. + mutating func clearDbName() {self._dbName = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _dbName: String? = nil +} + +/// See `spark.catalog.databaseExists` +struct Spark_Connect_DatabaseExists: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var dbName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.tableExists` +struct Spark_Connect_TableExists: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + /// (Optional) + var dbName: String { + get {return _dbName ?? String()} + set {_dbName = newValue} + } + /// Returns true if `dbName` has been explicitly set. + var hasDbName: Bool {return self._dbName != nil} + /// Clears the value of `dbName`. Subsequent reads from it will return its default value. + mutating func clearDbName() {self._dbName = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _dbName: String? = nil +} + +/// See `spark.catalog.functionExists` +struct Spark_Connect_FunctionExists: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var functionName: String = String() + + /// (Optional) + var dbName: String { + get {return _dbName ?? String()} + set {_dbName = newValue} + } + /// Returns true if `dbName` has been explicitly set. + var hasDbName: Bool {return self._dbName != nil} + /// Clears the value of `dbName`. Subsequent reads from it will return its default value. + mutating func clearDbName() {self._dbName = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _dbName: String? = nil +} + +/// See `spark.catalog.createExternalTable` +struct Spark_Connect_CreateExternalTable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + /// (Optional) + var path: String { + get {return _path ?? String()} + set {_path = newValue} + } + /// Returns true if `path` has been explicitly set. + var hasPath: Bool {return self._path != nil} + /// Clears the value of `path`. Subsequent reads from it will return its default value. + mutating func clearPath() {self._path = nil} + + /// (Optional) + var source: String { + get {return _source ?? String()} + set {_source = newValue} + } + /// Returns true if `source` has been explicitly set. + var hasSource: Bool {return self._source != nil} + /// Clears the value of `source`. Subsequent reads from it will return its default value. + mutating func clearSource() {self._source = nil} + + /// (Optional) + var schema: Spark_Connect_DataType { + get {return _schema ?? Spark_Connect_DataType()} + set {_schema = newValue} + } + /// Returns true if `schema` has been explicitly set. + var hasSchema: Bool {return self._schema != nil} + /// Clears the value of `schema`. Subsequent reads from it will return its default value. + mutating func clearSchema() {self._schema = nil} + + /// Options could be empty for valid data source format. + /// The map key is case insensitive. + var options: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _path: String? = nil + fileprivate var _source: String? = nil + fileprivate var _schema: Spark_Connect_DataType? = nil +} + +/// See `spark.catalog.createTable` +struct Spark_Connect_CreateTable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + /// (Optional) + var path: String { + get {return _path ?? String()} + set {_path = newValue} + } + /// Returns true if `path` has been explicitly set. + var hasPath: Bool {return self._path != nil} + /// Clears the value of `path`. Subsequent reads from it will return its default value. + mutating func clearPath() {self._path = nil} + + /// (Optional) + var source: String { + get {return _source ?? String()} + set {_source = newValue} + } + /// Returns true if `source` has been explicitly set. + var hasSource: Bool {return self._source != nil} + /// Clears the value of `source`. Subsequent reads from it will return its default value. + mutating func clearSource() {self._source = nil} + + /// (Optional) + var description_p: String { + get {return _description_p ?? String()} + set {_description_p = newValue} + } + /// Returns true if `description_p` has been explicitly set. + var hasDescription_p: Bool {return self._description_p != nil} + /// Clears the value of `description_p`. Subsequent reads from it will return its default value. + mutating func clearDescription_p() {self._description_p = nil} + + /// (Optional) + var schema: Spark_Connect_DataType { + get {return _schema ?? Spark_Connect_DataType()} + set {_schema = newValue} + } + /// Returns true if `schema` has been explicitly set. + var hasSchema: Bool {return self._schema != nil} + /// Clears the value of `schema`. Subsequent reads from it will return its default value. + mutating func clearSchema() {self._schema = nil} + + /// Options could be empty for valid data source format. + /// The map key is case insensitive. + var options: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _path: String? = nil + fileprivate var _source: String? = nil + fileprivate var _description_p: String? = nil + fileprivate var _schema: Spark_Connect_DataType? = nil +} + +/// See `spark.catalog.dropTempView` +struct Spark_Connect_DropTempView: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var viewName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.dropGlobalTempView` +struct Spark_Connect_DropGlobalTempView: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var viewName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.recoverPartitions` +struct Spark_Connect_RecoverPartitions: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.isCached` +struct Spark_Connect_IsCached: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.cacheTable` +struct Spark_Connect_CacheTable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + /// (Optional) + var storageLevel: Spark_Connect_StorageLevel { + get {return _storageLevel ?? Spark_Connect_StorageLevel()} + set {_storageLevel = newValue} + } + /// Returns true if `storageLevel` has been explicitly set. + var hasStorageLevel: Bool {return self._storageLevel != nil} + /// Clears the value of `storageLevel`. Subsequent reads from it will return its default value. + mutating func clearStorageLevel() {self._storageLevel = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storageLevel: Spark_Connect_StorageLevel? = nil +} + +/// See `spark.catalog.uncacheTable` +struct Spark_Connect_UncacheTable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.clearCache` +struct Spark_Connect_ClearCache: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.refreshTable` +struct Spark_Connect_RefreshTable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var tableName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.refreshByPath` +struct Spark_Connect_RefreshByPath: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var path: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.currentCatalog` +struct Spark_Connect_CurrentCatalog: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.setCurrentCatalog` +struct Spark_Connect_SetCurrentCatalog: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) + var catalogName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// See `spark.catalog.listCatalogs` +struct Spark_Connect_ListCatalogs: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) The pattern that the catalog name needs to match + var pattern: String { + get {return _pattern ?? String()} + set {_pattern = newValue} + } + /// Returns true if `pattern` has been explicitly set. + var hasPattern: Bool {return self._pattern != nil} + /// Clears the value of `pattern`. Subsequent reads from it will return its default value. + mutating func clearPattern() {self._pattern = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _pattern: String? = nil +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_Catalog: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Catalog" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "current_database"), + 2: .standard(proto: "set_current_database"), + 3: .standard(proto: "list_databases"), + 4: .standard(proto: "list_tables"), + 5: .standard(proto: "list_functions"), + 6: .standard(proto: "list_columns"), + 7: .standard(proto: "get_database"), + 8: .standard(proto: "get_table"), + 9: .standard(proto: "get_function"), + 10: .standard(proto: "database_exists"), + 11: .standard(proto: "table_exists"), + 12: .standard(proto: "function_exists"), + 13: .standard(proto: "create_external_table"), + 14: .standard(proto: "create_table"), + 15: .standard(proto: "drop_temp_view"), + 16: .standard(proto: "drop_global_temp_view"), + 17: .standard(proto: "recover_partitions"), + 18: .standard(proto: "is_cached"), + 19: .standard(proto: "cache_table"), + 20: .standard(proto: "uncache_table"), + 21: .standard(proto: "clear_cache"), + 22: .standard(proto: "refresh_table"), + 23: .standard(proto: "refresh_by_path"), + 24: .standard(proto: "current_catalog"), + 25: .standard(proto: "set_current_catalog"), + 26: .standard(proto: "list_catalogs"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_CurrentDatabase? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .currentDatabase(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .currentDatabase(v) + } + }() + case 2: try { + var v: Spark_Connect_SetCurrentDatabase? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .setCurrentDatabase(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .setCurrentDatabase(v) + } + }() + case 3: try { + var v: Spark_Connect_ListDatabases? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .listDatabases(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .listDatabases(v) + } + }() + case 4: try { + var v: Spark_Connect_ListTables? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .listTables(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .listTables(v) + } + }() + case 5: try { + var v: Spark_Connect_ListFunctions? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .listFunctions(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .listFunctions(v) + } + }() + case 6: try { + var v: Spark_Connect_ListColumns? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .listColumns(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .listColumns(v) + } + }() + case 7: try { + var v: Spark_Connect_GetDatabase? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .getDatabase(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .getDatabase(v) + } + }() + case 8: try { + var v: Spark_Connect_GetTable? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .getTable(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .getTable(v) + } + }() + case 9: try { + var v: Spark_Connect_GetFunction? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .getFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .getFunction(v) + } + }() + case 10: try { + var v: Spark_Connect_DatabaseExists? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .databaseExists(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .databaseExists(v) + } + }() + case 11: try { + var v: Spark_Connect_TableExists? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .tableExists(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .tableExists(v) + } + }() + case 12: try { + var v: Spark_Connect_FunctionExists? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .functionExists(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .functionExists(v) + } + }() + case 13: try { + var v: Spark_Connect_CreateExternalTable? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .createExternalTable(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .createExternalTable(v) + } + }() + case 14: try { + var v: Spark_Connect_CreateTable? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .createTable(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .createTable(v) + } + }() + case 15: try { + var v: Spark_Connect_DropTempView? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .dropTempView(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .dropTempView(v) + } + }() + case 16: try { + var v: Spark_Connect_DropGlobalTempView? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .dropGlobalTempView(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .dropGlobalTempView(v) + } + }() + case 17: try { + var v: Spark_Connect_RecoverPartitions? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .recoverPartitions(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .recoverPartitions(v) + } + }() + case 18: try { + var v: Spark_Connect_IsCached? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .isCached(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .isCached(v) + } + }() + case 19: try { + var v: Spark_Connect_CacheTable? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .cacheTable(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .cacheTable(v) + } + }() + case 20: try { + var v: Spark_Connect_UncacheTable? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .uncacheTable(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .uncacheTable(v) + } + }() + case 21: try { + var v: Spark_Connect_ClearCache? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .clearCache_p(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .clearCache_p(v) + } + }() + case 22: try { + var v: Spark_Connect_RefreshTable? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .refreshTable(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .refreshTable(v) + } + }() + case 23: try { + var v: Spark_Connect_RefreshByPath? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .refreshByPath(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .refreshByPath(v) + } + }() + case 24: try { + var v: Spark_Connect_CurrentCatalog? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .currentCatalog(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .currentCatalog(v) + } + }() + case 25: try { + var v: Spark_Connect_SetCurrentCatalog? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .setCurrentCatalog(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .setCurrentCatalog(v) + } + }() + case 26: try { + var v: Spark_Connect_ListCatalogs? + var hadOneofValue = false + if let current = self.catType { + hadOneofValue = true + if case .listCatalogs(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.catType = .listCatalogs(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.catType { + case .currentDatabase?: try { + guard case .currentDatabase(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .setCurrentDatabase?: try { + guard case .setCurrentDatabase(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .listDatabases?: try { + guard case .listDatabases(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .listTables?: try { + guard case .listTables(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .listFunctions?: try { + guard case .listFunctions(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .listColumns?: try { + guard case .listColumns(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case .getDatabase?: try { + guard case .getDatabase(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case .getTable?: try { + guard case .getTable(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + }() + case .getFunction?: try { + guard case .getFunction(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 9) + }() + case .databaseExists?: try { + guard case .databaseExists(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 10) + }() + case .tableExists?: try { + guard case .tableExists(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 11) + }() + case .functionExists?: try { + guard case .functionExists(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 12) + }() + case .createExternalTable?: try { + guard case .createExternalTable(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 13) + }() + case .createTable?: try { + guard case .createTable(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 14) + }() + case .dropTempView?: try { + guard case .dropTempView(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 15) + }() + case .dropGlobalTempView?: try { + guard case .dropGlobalTempView(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 16) + }() + case .recoverPartitions?: try { + guard case .recoverPartitions(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 17) + }() + case .isCached?: try { + guard case .isCached(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 18) + }() + case .cacheTable?: try { + guard case .cacheTable(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 19) + }() + case .uncacheTable?: try { + guard case .uncacheTable(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 20) + }() + case .clearCache_p?: try { + guard case .clearCache_p(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 21) + }() + case .refreshTable?: try { + guard case .refreshTable(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 22) + }() + case .refreshByPath?: try { + guard case .refreshByPath(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 23) + }() + case .currentCatalog?: try { + guard case .currentCatalog(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 24) + }() + case .setCurrentCatalog?: try { + guard case .setCurrentCatalog(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 25) + }() + case .listCatalogs?: try { + guard case .listCatalogs(let v)? = self.catType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 26) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Catalog, rhs: Spark_Connect_Catalog) -> Bool { + if lhs.catType != rhs.catType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CurrentDatabase: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CurrentDatabase" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CurrentDatabase, rhs: Spark_Connect_CurrentDatabase) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_SetCurrentDatabase: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".SetCurrentDatabase" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "db_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.dbName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.dbName.isEmpty { + try visitor.visitSingularStringField(value: self.dbName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_SetCurrentDatabase, rhs: Spark_Connect_SetCurrentDatabase) -> Bool { + if lhs.dbName != rhs.dbName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ListDatabases: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ListDatabases" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "pattern"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._pattern) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._pattern { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ListDatabases, rhs: Spark_Connect_ListDatabases) -> Bool { + if lhs._pattern != rhs._pattern {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ListTables: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ListTables" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "db_name"), + 2: .same(proto: "pattern"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._dbName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._pattern) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._dbName { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._pattern { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ListTables, rhs: Spark_Connect_ListTables) -> Bool { + if lhs._dbName != rhs._dbName {return false} + if lhs._pattern != rhs._pattern {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ListFunctions: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ListFunctions" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "db_name"), + 2: .same(proto: "pattern"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._dbName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._pattern) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._dbName { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._pattern { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ListFunctions, rhs: Spark_Connect_ListFunctions) -> Bool { + if lhs._dbName != rhs._dbName {return false} + if lhs._pattern != rhs._pattern {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ListColumns: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ListColumns" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + 2: .standard(proto: "db_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._dbName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try { if let v = self._dbName { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ListColumns, rhs: Spark_Connect_ListColumns) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs._dbName != rhs._dbName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_GetDatabase: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".GetDatabase" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "db_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.dbName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.dbName.isEmpty { + try visitor.visitSingularStringField(value: self.dbName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_GetDatabase, rhs: Spark_Connect_GetDatabase) -> Bool { + if lhs.dbName != rhs.dbName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_GetTable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".GetTable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + 2: .standard(proto: "db_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._dbName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try { if let v = self._dbName { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_GetTable, rhs: Spark_Connect_GetTable) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs._dbName != rhs._dbName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_GetFunction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".GetFunction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "function_name"), + 2: .standard(proto: "db_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.functionName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._dbName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.functionName.isEmpty { + try visitor.visitSingularStringField(value: self.functionName, fieldNumber: 1) + } + try { if let v = self._dbName { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_GetFunction, rhs: Spark_Connect_GetFunction) -> Bool { + if lhs.functionName != rhs.functionName {return false} + if lhs._dbName != rhs._dbName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DatabaseExists: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".DatabaseExists" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "db_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.dbName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.dbName.isEmpty { + try visitor.visitSingularStringField(value: self.dbName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DatabaseExists, rhs: Spark_Connect_DatabaseExists) -> Bool { + if lhs.dbName != rhs.dbName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_TableExists: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".TableExists" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + 2: .standard(proto: "db_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._dbName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try { if let v = self._dbName { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_TableExists, rhs: Spark_Connect_TableExists) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs._dbName != rhs._dbName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_FunctionExists: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".FunctionExists" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "function_name"), + 2: .standard(proto: "db_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.functionName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._dbName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.functionName.isEmpty { + try visitor.visitSingularStringField(value: self.functionName, fieldNumber: 1) + } + try { if let v = self._dbName { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_FunctionExists, rhs: Spark_Connect_FunctionExists) -> Bool { + if lhs.functionName != rhs.functionName {return false} + if lhs._dbName != rhs._dbName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CreateExternalTable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CreateExternalTable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + 2: .same(proto: "path"), + 3: .same(proto: "source"), + 4: .same(proto: "schema"), + 5: .same(proto: "options"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._path) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._source) }() + case 4: try { try decoder.decodeSingularMessageField(value: &self._schema) }() + case 5: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.options) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try { if let v = self._path { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try { if let v = self._source { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + try { if let v = self._schema { + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + } }() + if !self.options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.options, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CreateExternalTable, rhs: Spark_Connect_CreateExternalTable) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs._path != rhs._path {return false} + if lhs._source != rhs._source {return false} + if lhs._schema != rhs._schema {return false} + if lhs.options != rhs.options {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CreateTable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CreateTable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + 2: .same(proto: "path"), + 3: .same(proto: "source"), + 4: .same(proto: "description"), + 5: .same(proto: "schema"), + 6: .same(proto: "options"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._path) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._source) }() + case 4: try { try decoder.decodeSingularStringField(value: &self._description_p) }() + case 5: try { try decoder.decodeSingularMessageField(value: &self._schema) }() + case 6: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.options) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try { if let v = self._path { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try { if let v = self._source { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + try { if let v = self._description_p { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + try { if let v = self._schema { + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + } }() + if !self.options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.options, fieldNumber: 6) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CreateTable, rhs: Spark_Connect_CreateTable) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs._path != rhs._path {return false} + if lhs._source != rhs._source {return false} + if lhs._description_p != rhs._description_p {return false} + if lhs._schema != rhs._schema {return false} + if lhs.options != rhs.options {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DropTempView: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".DropTempView" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "view_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.viewName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.viewName.isEmpty { + try visitor.visitSingularStringField(value: self.viewName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DropTempView, rhs: Spark_Connect_DropTempView) -> Bool { + if lhs.viewName != rhs.viewName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DropGlobalTempView: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".DropGlobalTempView" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "view_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.viewName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.viewName.isEmpty { + try visitor.visitSingularStringField(value: self.viewName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DropGlobalTempView, rhs: Spark_Connect_DropGlobalTempView) -> Bool { + if lhs.viewName != rhs.viewName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_RecoverPartitions: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".RecoverPartitions" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_RecoverPartitions, rhs: Spark_Connect_RecoverPartitions) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_IsCached: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".IsCached" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_IsCached, rhs: Spark_Connect_IsCached) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CacheTable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CacheTable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + 2: .standard(proto: "storage_level"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._storageLevel) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try { if let v = self._storageLevel { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CacheTable, rhs: Spark_Connect_CacheTable) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs._storageLevel != rhs._storageLevel {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_UncacheTable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".UncacheTable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_UncacheTable, rhs: Spark_Connect_UncacheTable) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ClearCache: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ClearCache" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ClearCache, rhs: Spark_Connect_ClearCache) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_RefreshTable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".RefreshTable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_RefreshTable, rhs: Spark_Connect_RefreshTable) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_RefreshByPath: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".RefreshByPath" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "path"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.path) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.path.isEmpty { + try visitor.visitSingularStringField(value: self.path, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_RefreshByPath, rhs: Spark_Connect_RefreshByPath) -> Bool { + if lhs.path != rhs.path {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CurrentCatalog: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CurrentCatalog" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CurrentCatalog, rhs: Spark_Connect_CurrentCatalog) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_SetCurrentCatalog: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".SetCurrentCatalog" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "catalog_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.catalogName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.catalogName.isEmpty { + try visitor.visitSingularStringField(value: self.catalogName, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_SetCurrentCatalog, rhs: Spark_Connect_SetCurrentCatalog) -> Bool { + if lhs.catalogName != rhs.catalogName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ListCatalogs: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ListCatalogs" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "pattern"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._pattern) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._pattern { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ListCatalogs, rhs: Spark_Connect_ListCatalogs) -> Bool { + if lhs._pattern != rhs._pattern {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Sources/SparkConnect/commands.grpc.swift b/Sources/SparkConnect/commands.grpc.swift new file mode 100644 index 0000000..87fa382 --- /dev/null +++ b/Sources/SparkConnect/commands.grpc.swift @@ -0,0 +1,26 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/commands.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/commands.pb.swift b/Sources/SparkConnect/commands.pb.swift new file mode 100644 index 0000000..276dc01 --- /dev/null +++ b/Sources/SparkConnect/commands.pb.swift @@ -0,0 +1,4588 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/commands.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +/// The enum used for client side streaming query listener event +/// There is no QueryStartedEvent defined here, +/// it is added as a field in WriteStreamOperationStartResult +enum Spark_Connect_StreamingQueryEventType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case queryProgressUnspecified // = 0 + case queryProgressEvent // = 1 + case queryTerminatedEvent // = 2 + case queryIdleEvent // = 3 + case UNRECOGNIZED(Int) + + init() { + self = .queryProgressUnspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .queryProgressUnspecified + case 1: self = .queryProgressEvent + case 2: self = .queryTerminatedEvent + case 3: self = .queryIdleEvent + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .queryProgressUnspecified: return 0 + case .queryProgressEvent: return 1 + case .queryTerminatedEvent: return 2 + case .queryIdleEvent: return 3 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_StreamingQueryEventType] = [ + .queryProgressUnspecified, + .queryProgressEvent, + .queryTerminatedEvent, + .queryIdleEvent, + ] + +} + +/// A [[Command]] is an operation that is executed by the server that does not directly consume or +/// produce a relational result. +struct Spark_Connect_Command: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var commandType: Spark_Connect_Command.OneOf_CommandType? = nil + + var registerFunction: Spark_Connect_CommonInlineUserDefinedFunction { + get { + if case .registerFunction(let v)? = commandType {return v} + return Spark_Connect_CommonInlineUserDefinedFunction() + } + set {commandType = .registerFunction(newValue)} + } + + var writeOperation: Spark_Connect_WriteOperation { + get { + if case .writeOperation(let v)? = commandType {return v} + return Spark_Connect_WriteOperation() + } + set {commandType = .writeOperation(newValue)} + } + + var createDataframeView: Spark_Connect_CreateDataFrameViewCommand { + get { + if case .createDataframeView(let v)? = commandType {return v} + return Spark_Connect_CreateDataFrameViewCommand() + } + set {commandType = .createDataframeView(newValue)} + } + + var writeOperationV2: Spark_Connect_WriteOperationV2 { + get { + if case .writeOperationV2(let v)? = commandType {return v} + return Spark_Connect_WriteOperationV2() + } + set {commandType = .writeOperationV2(newValue)} + } + + var sqlCommand: Spark_Connect_SqlCommand { + get { + if case .sqlCommand(let v)? = commandType {return v} + return Spark_Connect_SqlCommand() + } + set {commandType = .sqlCommand(newValue)} + } + + var writeStreamOperationStart: Spark_Connect_WriteStreamOperationStart { + get { + if case .writeStreamOperationStart(let v)? = commandType {return v} + return Spark_Connect_WriteStreamOperationStart() + } + set {commandType = .writeStreamOperationStart(newValue)} + } + + var streamingQueryCommand: Spark_Connect_StreamingQueryCommand { + get { + if case .streamingQueryCommand(let v)? = commandType {return v} + return Spark_Connect_StreamingQueryCommand() + } + set {commandType = .streamingQueryCommand(newValue)} + } + + var getResourcesCommand: Spark_Connect_GetResourcesCommand { + get { + if case .getResourcesCommand(let v)? = commandType {return v} + return Spark_Connect_GetResourcesCommand() + } + set {commandType = .getResourcesCommand(newValue)} + } + + var streamingQueryManagerCommand: Spark_Connect_StreamingQueryManagerCommand { + get { + if case .streamingQueryManagerCommand(let v)? = commandType {return v} + return Spark_Connect_StreamingQueryManagerCommand() + } + set {commandType = .streamingQueryManagerCommand(newValue)} + } + + var registerTableFunction: Spark_Connect_CommonInlineUserDefinedTableFunction { + get { + if case .registerTableFunction(let v)? = commandType {return v} + return Spark_Connect_CommonInlineUserDefinedTableFunction() + } + set {commandType = .registerTableFunction(newValue)} + } + + var streamingQueryListenerBusCommand: Spark_Connect_StreamingQueryListenerBusCommand { + get { + if case .streamingQueryListenerBusCommand(let v)? = commandType {return v} + return Spark_Connect_StreamingQueryListenerBusCommand() + } + set {commandType = .streamingQueryListenerBusCommand(newValue)} + } + + var registerDataSource: Spark_Connect_CommonInlineUserDefinedDataSource { + get { + if case .registerDataSource(let v)? = commandType {return v} + return Spark_Connect_CommonInlineUserDefinedDataSource() + } + set {commandType = .registerDataSource(newValue)} + } + + var createResourceProfileCommand: Spark_Connect_CreateResourceProfileCommand { + get { + if case .createResourceProfileCommand(let v)? = commandType {return v} + return Spark_Connect_CreateResourceProfileCommand() + } + set {commandType = .createResourceProfileCommand(newValue)} + } + + var checkpointCommand: Spark_Connect_CheckpointCommand { + get { + if case .checkpointCommand(let v)? = commandType {return v} + return Spark_Connect_CheckpointCommand() + } + set {commandType = .checkpointCommand(newValue)} + } + + var removeCachedRemoteRelationCommand: Spark_Connect_RemoveCachedRemoteRelationCommand { + get { + if case .removeCachedRemoteRelationCommand(let v)? = commandType {return v} + return Spark_Connect_RemoveCachedRemoteRelationCommand() + } + set {commandType = .removeCachedRemoteRelationCommand(newValue)} + } + + var mergeIntoTableCommand: Spark_Connect_MergeIntoTableCommand { + get { + if case .mergeIntoTableCommand(let v)? = commandType {return v} + return Spark_Connect_MergeIntoTableCommand() + } + set {commandType = .mergeIntoTableCommand(newValue)} + } + + var mlCommand: Spark_Connect_MlCommand { + get { + if case .mlCommand(let v)? = commandType {return v} + return Spark_Connect_MlCommand() + } + set {commandType = .mlCommand(newValue)} + } + + var executeExternalCommand: Spark_Connect_ExecuteExternalCommand { + get { + if case .executeExternalCommand(let v)? = commandType {return v} + return Spark_Connect_ExecuteExternalCommand() + } + set {commandType = .executeExternalCommand(newValue)} + } + + /// This field is used to mark extensions to the protocol. When plugins generate arbitrary + /// Commands they can add them here. During the planning the correct resolution is done. + var `extension`: SwiftProtobuf.Google_Protobuf_Any { + get { + if case .extension(let v)? = commandType {return v} + return SwiftProtobuf.Google_Protobuf_Any() + } + set {commandType = .extension(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_CommandType: Equatable, Sendable { + case registerFunction(Spark_Connect_CommonInlineUserDefinedFunction) + case writeOperation(Spark_Connect_WriteOperation) + case createDataframeView(Spark_Connect_CreateDataFrameViewCommand) + case writeOperationV2(Spark_Connect_WriteOperationV2) + case sqlCommand(Spark_Connect_SqlCommand) + case writeStreamOperationStart(Spark_Connect_WriteStreamOperationStart) + case streamingQueryCommand(Spark_Connect_StreamingQueryCommand) + case getResourcesCommand(Spark_Connect_GetResourcesCommand) + case streamingQueryManagerCommand(Spark_Connect_StreamingQueryManagerCommand) + case registerTableFunction(Spark_Connect_CommonInlineUserDefinedTableFunction) + case streamingQueryListenerBusCommand(Spark_Connect_StreamingQueryListenerBusCommand) + case registerDataSource(Spark_Connect_CommonInlineUserDefinedDataSource) + case createResourceProfileCommand(Spark_Connect_CreateResourceProfileCommand) + case checkpointCommand(Spark_Connect_CheckpointCommand) + case removeCachedRemoteRelationCommand(Spark_Connect_RemoveCachedRemoteRelationCommand) + case mergeIntoTableCommand(Spark_Connect_MergeIntoTableCommand) + case mlCommand(Spark_Connect_MlCommand) + case executeExternalCommand(Spark_Connect_ExecuteExternalCommand) + /// This field is used to mark extensions to the protocol. When plugins generate arbitrary + /// Commands they can add them here. During the planning the correct resolution is done. + case `extension`(SwiftProtobuf.Google_Protobuf_Any) + + } + + init() {} +} + +/// A SQL Command is used to trigger the eager evaluation of SQL commands in Spark. +/// +/// When the SQL provide as part of the message is a command it will be immediately evaluated +/// and the result will be collected and returned as part of a LocalRelation. If the result is +/// not a command, the operation will simply return a SQL Relation. This allows the client to be +/// almost oblivious to the server-side behavior. +struct Spark_Connect_SqlCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) SQL Query. + /// + /// NOTE: This field was marked as deprecated in the .proto file. + var sql: String = String() + + /// (Optional) A map of parameter names to literal expressions. + /// + /// NOTE: This field was marked as deprecated in the .proto file. + var args: Dictionary = [:] + + /// (Optional) A sequence of literal expressions for positional parameters in the SQL query text. + /// + /// NOTE: This field was marked as deprecated in the .proto file. + var posArgs: [Spark_Connect_Expression.Literal] = [] + + /// (Optional) A map of parameter names to expressions. + /// It cannot coexist with `pos_arguments`. + /// + /// NOTE: This field was marked as deprecated in the .proto file. + var namedArguments: Dictionary = [:] + + /// (Optional) A sequence of expressions for positional parameters in the SQL query text. + /// It cannot coexist with `named_arguments`. + /// + /// NOTE: This field was marked as deprecated in the .proto file. + var posArguments: [Spark_Connect_Expression] = [] + + /// (Optional) The relation that this SQL command will be built on. + var input: Spark_Connect_Relation { + get {return _input ?? Spark_Connect_Relation()} + set {_input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return self._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {self._input = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _input: Spark_Connect_Relation? = nil +} + +/// A command that can create DataFrame global temp view or local temp view. +struct Spark_Connect_CreateDataFrameViewCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The relation that this view will be built on. + var input: Spark_Connect_Relation { + get {return _input ?? Spark_Connect_Relation()} + set {_input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return self._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {self._input = nil} + + /// (Required) View name. + var name: String = String() + + /// (Required) Whether this is global temp view or local temp view. + var isGlobal: Bool = false + + /// (Required) + /// + /// If true, and if the view already exists, updates it; if false, and if the view + /// already exists, throws exception. + var replace: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _input: Spark_Connect_Relation? = nil +} + +/// As writes are not directly handled during analysis and planning, they are modeled as commands. +struct Spark_Connect_WriteOperation: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The output of the `input` relation will be persisted according to the options. + var input: Spark_Connect_Relation { + get {return _input ?? Spark_Connect_Relation()} + set {_input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return self._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {self._input = nil} + + /// (Optional) Format value according to the Spark documentation. Examples are: text, parquet, delta. + var source: String { + get {return _source ?? String()} + set {_source = newValue} + } + /// Returns true if `source` has been explicitly set. + var hasSource: Bool {return self._source != nil} + /// Clears the value of `source`. Subsequent reads from it will return its default value. + mutating func clearSource() {self._source = nil} + + /// (Optional) + /// + /// The destination of the write operation can be either a path or a table. + /// If the destination is neither a path nor a table, such as jdbc and noop, + /// the `save_type` should not be set. + var saveType: Spark_Connect_WriteOperation.OneOf_SaveType? = nil + + var path: String { + get { + if case .path(let v)? = saveType {return v} + return String() + } + set {saveType = .path(newValue)} + } + + var table: Spark_Connect_WriteOperation.SaveTable { + get { + if case .table(let v)? = saveType {return v} + return Spark_Connect_WriteOperation.SaveTable() + } + set {saveType = .table(newValue)} + } + + /// (Required) the save mode. + var mode: Spark_Connect_WriteOperation.SaveMode = .unspecified + + /// (Optional) List of columns to sort the output by. + var sortColumnNames: [String] = [] + + /// (Optional) List of columns for partitioning. + var partitioningColumns: [String] = [] + + /// (Optional) Bucketing specification. Bucketing must set the number of buckets and the columns + /// to bucket by. + var bucketBy: Spark_Connect_WriteOperation.BucketBy { + get {return _bucketBy ?? Spark_Connect_WriteOperation.BucketBy()} + set {_bucketBy = newValue} + } + /// Returns true if `bucketBy` has been explicitly set. + var hasBucketBy: Bool {return self._bucketBy != nil} + /// Clears the value of `bucketBy`. Subsequent reads from it will return its default value. + mutating func clearBucketBy() {self._bucketBy = nil} + + /// (Optional) A list of configuration options. + var options: Dictionary = [:] + + /// (Optional) Columns used for clustering the table. + var clusteringColumns: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// (Optional) + /// + /// The destination of the write operation can be either a path or a table. + /// If the destination is neither a path nor a table, such as jdbc and noop, + /// the `save_type` should not be set. + enum OneOf_SaveType: Equatable, Sendable { + case path(String) + case table(Spark_Connect_WriteOperation.SaveTable) + + } + + enum SaveMode: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + case append // = 1 + case overwrite // = 2 + case errorIfExists // = 3 + case ignore // = 4 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .append + case 2: self = .overwrite + case 3: self = .errorIfExists + case 4: self = .ignore + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .append: return 1 + case .overwrite: return 2 + case .errorIfExists: return 3 + case .ignore: return 4 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_WriteOperation.SaveMode] = [ + .unspecified, + .append, + .overwrite, + .errorIfExists, + .ignore, + ] + + } + + struct SaveTable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The table name. + var tableName: String = String() + + /// (Required) The method to be called to write to the table. + var saveMethod: Spark_Connect_WriteOperation.SaveTable.TableSaveMethod = .unspecified + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum TableSaveMethod: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + case saveAsTable // = 1 + case insertInto // = 2 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .saveAsTable + case 2: self = .insertInto + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .saveAsTable: return 1 + case .insertInto: return 2 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_WriteOperation.SaveTable.TableSaveMethod] = [ + .unspecified, + .saveAsTable, + .insertInto, + ] + + } + + init() {} + } + + struct BucketBy: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var bucketColumnNames: [String] = [] + + var numBuckets: Int32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _input: Spark_Connect_Relation? = nil + fileprivate var _source: String? = nil + fileprivate var _bucketBy: Spark_Connect_WriteOperation.BucketBy? = nil +} + +/// As writes are not directly handled during analysis and planning, they are modeled as commands. +struct Spark_Connect_WriteOperationV2: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The output of the `input` relation will be persisted according to the options. + var input: Spark_Connect_Relation { + get {return _input ?? Spark_Connect_Relation()} + set {_input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return self._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {self._input = nil} + + /// (Required) The destination of the write operation must be either a path or a table. + var tableName: String = String() + + /// (Optional) A provider for the underlying output data source. Spark's default catalog supports + /// "parquet", "json", etc. + var provider: String { + get {return _provider ?? String()} + set {_provider = newValue} + } + /// Returns true if `provider` has been explicitly set. + var hasProvider: Bool {return self._provider != nil} + /// Clears the value of `provider`. Subsequent reads from it will return its default value. + mutating func clearProvider() {self._provider = nil} + + /// (Optional) List of columns for partitioning for output table created by `create`, + /// `createOrReplace`, or `replace` + var partitioningColumns: [Spark_Connect_Expression] = [] + + /// (Optional) A list of configuration options. + var options: Dictionary = [:] + + /// (Optional) A list of table properties. + var tableProperties: Dictionary = [:] + + /// (Required) Write mode. + var mode: Spark_Connect_WriteOperationV2.Mode = .unspecified + + /// (Optional) A condition for overwrite saving mode + var overwriteCondition: Spark_Connect_Expression { + get {return _overwriteCondition ?? Spark_Connect_Expression()} + set {_overwriteCondition = newValue} + } + /// Returns true if `overwriteCondition` has been explicitly set. + var hasOverwriteCondition: Bool {return self._overwriteCondition != nil} + /// Clears the value of `overwriteCondition`. Subsequent reads from it will return its default value. + mutating func clearOverwriteCondition() {self._overwriteCondition = nil} + + /// (Optional) Columns used for clustering the table. + var clusteringColumns: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum Mode: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + case create // = 1 + case overwrite // = 2 + case overwritePartitions // = 3 + case append // = 4 + case replace // = 5 + case createOrReplace // = 6 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .create + case 2: self = .overwrite + case 3: self = .overwritePartitions + case 4: self = .append + case 5: self = .replace + case 6: self = .createOrReplace + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .create: return 1 + case .overwrite: return 2 + case .overwritePartitions: return 3 + case .append: return 4 + case .replace: return 5 + case .createOrReplace: return 6 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_WriteOperationV2.Mode] = [ + .unspecified, + .create, + .overwrite, + .overwritePartitions, + .append, + .replace, + .createOrReplace, + ] + + } + + init() {} + + fileprivate var _input: Spark_Connect_Relation? = nil + fileprivate var _provider: String? = nil + fileprivate var _overwriteCondition: Spark_Connect_Expression? = nil +} + +/// Starts write stream operation as streaming query. Query ID and Run ID of the streaming +/// query are returned. +struct Spark_Connect_WriteStreamOperationStart: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The output of the `input` streaming relation will be written. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + var format: String { + get {return _storage._format} + set {_uniqueStorage()._format = newValue} + } + + var options: Dictionary { + get {return _storage._options} + set {_uniqueStorage()._options = newValue} + } + + var partitioningColumnNames: [String] { + get {return _storage._partitioningColumnNames} + set {_uniqueStorage()._partitioningColumnNames = newValue} + } + + var trigger: OneOf_Trigger? { + get {return _storage._trigger} + set {_uniqueStorage()._trigger = newValue} + } + + var processingTimeInterval: String { + get { + if case .processingTimeInterval(let v)? = _storage._trigger {return v} + return String() + } + set {_uniqueStorage()._trigger = .processingTimeInterval(newValue)} + } + + var availableNow: Bool { + get { + if case .availableNow(let v)? = _storage._trigger {return v} + return false + } + set {_uniqueStorage()._trigger = .availableNow(newValue)} + } + + var once: Bool { + get { + if case .once(let v)? = _storage._trigger {return v} + return false + } + set {_uniqueStorage()._trigger = .once(newValue)} + } + + var continuousCheckpointInterval: String { + get { + if case .continuousCheckpointInterval(let v)? = _storage._trigger {return v} + return String() + } + set {_uniqueStorage()._trigger = .continuousCheckpointInterval(newValue)} + } + + var outputMode: String { + get {return _storage._outputMode} + set {_uniqueStorage()._outputMode = newValue} + } + + var queryName: String { + get {return _storage._queryName} + set {_uniqueStorage()._queryName = newValue} + } + + /// The destination is optional. When set, it can be a path or a table name. + var sinkDestination: OneOf_SinkDestination? { + get {return _storage._sinkDestination} + set {_uniqueStorage()._sinkDestination = newValue} + } + + var path: String { + get { + if case .path(let v)? = _storage._sinkDestination {return v} + return String() + } + set {_uniqueStorage()._sinkDestination = .path(newValue)} + } + + var tableName: String { + get { + if case .tableName(let v)? = _storage._sinkDestination {return v} + return String() + } + set {_uniqueStorage()._sinkDestination = .tableName(newValue)} + } + + var foreachWriter: Spark_Connect_StreamingForeachFunction { + get {return _storage._foreachWriter ?? Spark_Connect_StreamingForeachFunction()} + set {_uniqueStorage()._foreachWriter = newValue} + } + /// Returns true if `foreachWriter` has been explicitly set. + var hasForeachWriter: Bool {return _storage._foreachWriter != nil} + /// Clears the value of `foreachWriter`. Subsequent reads from it will return its default value. + mutating func clearForeachWriter() {_uniqueStorage()._foreachWriter = nil} + + var foreachBatch: Spark_Connect_StreamingForeachFunction { + get {return _storage._foreachBatch ?? Spark_Connect_StreamingForeachFunction()} + set {_uniqueStorage()._foreachBatch = newValue} + } + /// Returns true if `foreachBatch` has been explicitly set. + var hasForeachBatch: Bool {return _storage._foreachBatch != nil} + /// Clears the value of `foreachBatch`. Subsequent reads from it will return its default value. + mutating func clearForeachBatch() {_uniqueStorage()._foreachBatch = nil} + + /// (Optional) Columns used for clustering the table. + var clusteringColumnNames: [String] { + get {return _storage._clusteringColumnNames} + set {_uniqueStorage()._clusteringColumnNames = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Trigger: Equatable, Sendable { + case processingTimeInterval(String) + case availableNow(Bool) + case once(Bool) + case continuousCheckpointInterval(String) + + } + + /// The destination is optional. When set, it can be a path or a table name. + enum OneOf_SinkDestination: Equatable, Sendable { + case path(String) + case tableName(String) + + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_StreamingForeachFunction: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var function: Spark_Connect_StreamingForeachFunction.OneOf_Function? = nil + + var pythonFunction: Spark_Connect_PythonUDF { + get { + if case .pythonFunction(let v)? = function {return v} + return Spark_Connect_PythonUDF() + } + set {function = .pythonFunction(newValue)} + } + + var scalaFunction: Spark_Connect_ScalarScalaUDF { + get { + if case .scalaFunction(let v)? = function {return v} + return Spark_Connect_ScalarScalaUDF() + } + set {function = .scalaFunction(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Function: Equatable, Sendable { + case pythonFunction(Spark_Connect_PythonUDF) + case scalaFunction(Spark_Connect_ScalarScalaUDF) + + } + + init() {} +} + +struct Spark_Connect_WriteStreamOperationStartResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Query instance. See `StreamingQueryInstanceId`. + var queryID: Spark_Connect_StreamingQueryInstanceId { + get {return _queryID ?? Spark_Connect_StreamingQueryInstanceId()} + set {_queryID = newValue} + } + /// Returns true if `queryID` has been explicitly set. + var hasQueryID: Bool {return self._queryID != nil} + /// Clears the value of `queryID`. Subsequent reads from it will return its default value. + mutating func clearQueryID() {self._queryID = nil} + + /// An optional query name. + var name: String = String() + + /// Optional query started event if there is any listener registered on the client side. + var queryStartedEventJson: String { + get {return _queryStartedEventJson ?? String()} + set {_queryStartedEventJson = newValue} + } + /// Returns true if `queryStartedEventJson` has been explicitly set. + var hasQueryStartedEventJson: Bool {return self._queryStartedEventJson != nil} + /// Clears the value of `queryStartedEventJson`. Subsequent reads from it will return its default value. + mutating func clearQueryStartedEventJson() {self._queryStartedEventJson = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _queryID: Spark_Connect_StreamingQueryInstanceId? = nil + fileprivate var _queryStartedEventJson: String? = nil +} + +/// A tuple that uniquely identifies an instance of streaming query run. It consists of `id` that +/// persists across the streaming runs and `run_id` that changes between each run of the +/// streaming query that resumes from the checkpoint. +struct Spark_Connect_StreamingQueryInstanceId: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The unique id of this query that persists across restarts from checkpoint data. + /// That is, this id is generated when a query is started for the first time, and + /// will be the same every time it is restarted from checkpoint data. + var id: String = String() + + /// (Required) The unique id of this run of the query. That is, every start/restart of a query + /// will generate a unique run_id. Therefore, every time a query is restarted from + /// checkpoint, it will have the same `id` but different `run_id`s. + var runID: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Commands for a streaming query. +struct Spark_Connect_StreamingQueryCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Query instance. See `StreamingQueryInstanceId`. + var queryID: Spark_Connect_StreamingQueryInstanceId { + get {return _queryID ?? Spark_Connect_StreamingQueryInstanceId()} + set {_queryID = newValue} + } + /// Returns true if `queryID` has been explicitly set. + var hasQueryID: Bool {return self._queryID != nil} + /// Clears the value of `queryID`. Subsequent reads from it will return its default value. + mutating func clearQueryID() {self._queryID = nil} + + /// See documentation for the corresponding API method in StreamingQuery. + var command: Spark_Connect_StreamingQueryCommand.OneOf_Command? = nil + + /// status() API. + var status: Bool { + get { + if case .status(let v)? = command {return v} + return false + } + set {command = .status(newValue)} + } + + /// lastProgress() API. + var lastProgress: Bool { + get { + if case .lastProgress(let v)? = command {return v} + return false + } + set {command = .lastProgress(newValue)} + } + + /// recentProgress() API. + var recentProgress: Bool { + get { + if case .recentProgress(let v)? = command {return v} + return false + } + set {command = .recentProgress(newValue)} + } + + /// stop() API. Stops the query. + var stop: Bool { + get { + if case .stop(let v)? = command {return v} + return false + } + set {command = .stop(newValue)} + } + + /// processAllAvailable() API. Waits till all the available data is processed + var processAllAvailable: Bool { + get { + if case .processAllAvailable(let v)? = command {return v} + return false + } + set {command = .processAllAvailable(newValue)} + } + + /// explain() API. Returns logical and physical plans. + var explain: Spark_Connect_StreamingQueryCommand.ExplainCommand { + get { + if case .explain(let v)? = command {return v} + return Spark_Connect_StreamingQueryCommand.ExplainCommand() + } + set {command = .explain(newValue)} + } + + /// exception() API. Returns the exception in the query if any. + var exception: Bool { + get { + if case .exception(let v)? = command {return v} + return false + } + set {command = .exception(newValue)} + } + + /// awaitTermination() API. Waits for the termination of the query. + var awaitTermination: Spark_Connect_StreamingQueryCommand.AwaitTerminationCommand { + get { + if case .awaitTermination(let v)? = command {return v} + return Spark_Connect_StreamingQueryCommand.AwaitTerminationCommand() + } + set {command = .awaitTermination(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// See documentation for the corresponding API method in StreamingQuery. + enum OneOf_Command: Equatable, Sendable { + /// status() API. + case status(Bool) + /// lastProgress() API. + case lastProgress(Bool) + /// recentProgress() API. + case recentProgress(Bool) + /// stop() API. Stops the query. + case stop(Bool) + /// processAllAvailable() API. Waits till all the available data is processed + case processAllAvailable(Bool) + /// explain() API. Returns logical and physical plans. + case explain(Spark_Connect_StreamingQueryCommand.ExplainCommand) + /// exception() API. Returns the exception in the query if any. + case exception(Bool) + /// awaitTermination() API. Waits for the termination of the query. + case awaitTermination(Spark_Connect_StreamingQueryCommand.AwaitTerminationCommand) + + } + + struct ExplainCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// TODO: Consider reusing Explain from AnalyzePlanRequest message. + /// We can not do this right now since it base.proto imports this file. + var extended: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct AwaitTerminationCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var timeoutMs: Int64 { + get {return _timeoutMs ?? 0} + set {_timeoutMs = newValue} + } + /// Returns true if `timeoutMs` has been explicitly set. + var hasTimeoutMs: Bool {return self._timeoutMs != nil} + /// Clears the value of `timeoutMs`. Subsequent reads from it will return its default value. + mutating func clearTimeoutMs() {self._timeoutMs = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _timeoutMs: Int64? = nil + } + + init() {} + + fileprivate var _queryID: Spark_Connect_StreamingQueryInstanceId? = nil +} + +/// Response for commands on a streaming query. +struct Spark_Connect_StreamingQueryCommandResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Query instance id. See `StreamingQueryInstanceId`. + var queryID: Spark_Connect_StreamingQueryInstanceId { + get {return _queryID ?? Spark_Connect_StreamingQueryInstanceId()} + set {_queryID = newValue} + } + /// Returns true if `queryID` has been explicitly set. + var hasQueryID: Bool {return self._queryID != nil} + /// Clears the value of `queryID`. Subsequent reads from it will return its default value. + mutating func clearQueryID() {self._queryID = nil} + + var resultType: Spark_Connect_StreamingQueryCommandResult.OneOf_ResultType? = nil + + var status: Spark_Connect_StreamingQueryCommandResult.StatusResult { + get { + if case .status(let v)? = resultType {return v} + return Spark_Connect_StreamingQueryCommandResult.StatusResult() + } + set {resultType = .status(newValue)} + } + + var recentProgress: Spark_Connect_StreamingQueryCommandResult.RecentProgressResult { + get { + if case .recentProgress(let v)? = resultType {return v} + return Spark_Connect_StreamingQueryCommandResult.RecentProgressResult() + } + set {resultType = .recentProgress(newValue)} + } + + var explain: Spark_Connect_StreamingQueryCommandResult.ExplainResult { + get { + if case .explain(let v)? = resultType {return v} + return Spark_Connect_StreamingQueryCommandResult.ExplainResult() + } + set {resultType = .explain(newValue)} + } + + var exception: Spark_Connect_StreamingQueryCommandResult.ExceptionResult { + get { + if case .exception(let v)? = resultType {return v} + return Spark_Connect_StreamingQueryCommandResult.ExceptionResult() + } + set {resultType = .exception(newValue)} + } + + var awaitTermination: Spark_Connect_StreamingQueryCommandResult.AwaitTerminationResult { + get { + if case .awaitTermination(let v)? = resultType {return v} + return Spark_Connect_StreamingQueryCommandResult.AwaitTerminationResult() + } + set {resultType = .awaitTermination(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_ResultType: Equatable, Sendable { + case status(Spark_Connect_StreamingQueryCommandResult.StatusResult) + case recentProgress(Spark_Connect_StreamingQueryCommandResult.RecentProgressResult) + case explain(Spark_Connect_StreamingQueryCommandResult.ExplainResult) + case exception(Spark_Connect_StreamingQueryCommandResult.ExceptionResult) + case awaitTermination(Spark_Connect_StreamingQueryCommandResult.AwaitTerminationResult) + + } + + struct StatusResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// See documentation for these Scala 'StreamingQueryStatus' struct + var statusMessage: String = String() + + var isDataAvailable: Bool = false + + var isTriggerActive: Bool = false + + var isActive: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct RecentProgressResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Progress reports as an array of json strings. + var recentProgressJson: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct ExplainResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// Logical and physical plans as string + var result: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct ExceptionResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) Exception message as string, maps to the return value of original + /// StreamingQueryException's toString method + var exceptionMessage: String { + get {return _exceptionMessage ?? String()} + set {_exceptionMessage = newValue} + } + /// Returns true if `exceptionMessage` has been explicitly set. + var hasExceptionMessage: Bool {return self._exceptionMessage != nil} + /// Clears the value of `exceptionMessage`. Subsequent reads from it will return its default value. + mutating func clearExceptionMessage() {self._exceptionMessage = nil} + + /// (Optional) Exception error class as string + var errorClass: String { + get {return _errorClass ?? String()} + set {_errorClass = newValue} + } + /// Returns true if `errorClass` has been explicitly set. + var hasErrorClass: Bool {return self._errorClass != nil} + /// Clears the value of `errorClass`. Subsequent reads from it will return its default value. + mutating func clearErrorClass() {self._errorClass = nil} + + /// (Optional) Exception stack trace as string + var stackTrace: String { + get {return _stackTrace ?? String()} + set {_stackTrace = newValue} + } + /// Returns true if `stackTrace` has been explicitly set. + var hasStackTrace: Bool {return self._stackTrace != nil} + /// Clears the value of `stackTrace`. Subsequent reads from it will return its default value. + mutating func clearStackTrace() {self._stackTrace = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _exceptionMessage: String? = nil + fileprivate var _errorClass: String? = nil + fileprivate var _stackTrace: String? = nil + } + + struct AwaitTerminationResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var terminated: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _queryID: Spark_Connect_StreamingQueryInstanceId? = nil +} + +/// Commands for the streaming query manager. +struct Spark_Connect_StreamingQueryManagerCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// See documentation for the corresponding API method in StreamingQueryManager. + var command: Spark_Connect_StreamingQueryManagerCommand.OneOf_Command? = nil + + /// active() API, returns a list of active queries. + var active: Bool { + get { + if case .active(let v)? = command {return v} + return false + } + set {command = .active(newValue)} + } + + /// get() API, returns the StreamingQuery identified by id. + var getQuery: String { + get { + if case .getQuery(let v)? = command {return v} + return String() + } + set {command = .getQuery(newValue)} + } + + /// awaitAnyTermination() API, wait until any query terminates or timeout. + var awaitAnyTermination: Spark_Connect_StreamingQueryManagerCommand.AwaitAnyTerminationCommand { + get { + if case .awaitAnyTermination(let v)? = command {return v} + return Spark_Connect_StreamingQueryManagerCommand.AwaitAnyTerminationCommand() + } + set {command = .awaitAnyTermination(newValue)} + } + + /// resetTerminated() API. + var resetTerminated: Bool { + get { + if case .resetTerminated(let v)? = command {return v} + return false + } + set {command = .resetTerminated(newValue)} + } + + /// addListener API. + var addListener: Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand { + get { + if case .addListener(let v)? = command {return v} + return Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand() + } + set {command = .addListener(newValue)} + } + + /// removeListener API. + var removeListener: Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand { + get { + if case .removeListener(let v)? = command {return v} + return Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand() + } + set {command = .removeListener(newValue)} + } + + /// listListeners() API, returns a list of streaming query listeners. + var listListeners: Bool { + get { + if case .listListeners(let v)? = command {return v} + return false + } + set {command = .listListeners(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// See documentation for the corresponding API method in StreamingQueryManager. + enum OneOf_Command: Equatable, Sendable { + /// active() API, returns a list of active queries. + case active(Bool) + /// get() API, returns the StreamingQuery identified by id. + case getQuery(String) + /// awaitAnyTermination() API, wait until any query terminates or timeout. + case awaitAnyTermination(Spark_Connect_StreamingQueryManagerCommand.AwaitAnyTerminationCommand) + /// resetTerminated() API. + case resetTerminated(Bool) + /// addListener API. + case addListener(Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand) + /// removeListener API. + case removeListener(Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand) + /// listListeners() API, returns a list of streaming query listeners. + case listListeners(Bool) + + } + + struct AwaitAnyTerminationCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) The waiting time in milliseconds to wait for any query to terminate. + var timeoutMs: Int64 { + get {return _timeoutMs ?? 0} + set {_timeoutMs = newValue} + } + /// Returns true if `timeoutMs` has been explicitly set. + var hasTimeoutMs: Bool {return self._timeoutMs != nil} + /// Clears the value of `timeoutMs`. Subsequent reads from it will return its default value. + mutating func clearTimeoutMs() {self._timeoutMs = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _timeoutMs: Int64? = nil + } + + struct StreamingQueryListenerCommand: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var listenerPayload: Data = Data() + + var pythonListenerPayload: Spark_Connect_PythonUDF { + get {return _pythonListenerPayload ?? Spark_Connect_PythonUDF()} + set {_pythonListenerPayload = newValue} + } + /// Returns true if `pythonListenerPayload` has been explicitly set. + var hasPythonListenerPayload: Bool {return self._pythonListenerPayload != nil} + /// Clears the value of `pythonListenerPayload`. Subsequent reads from it will return its default value. + mutating func clearPythonListenerPayload() {self._pythonListenerPayload = nil} + + var id: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _pythonListenerPayload: Spark_Connect_PythonUDF? = nil + } + + init() {} +} + +/// Response for commands on the streaming query manager. +struct Spark_Connect_StreamingQueryManagerCommandResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var resultType: Spark_Connect_StreamingQueryManagerCommandResult.OneOf_ResultType? = nil + + var active: Spark_Connect_StreamingQueryManagerCommandResult.ActiveResult { + get { + if case .active(let v)? = resultType {return v} + return Spark_Connect_StreamingQueryManagerCommandResult.ActiveResult() + } + set {resultType = .active(newValue)} + } + + var query: Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryInstance { + get { + if case .query(let v)? = resultType {return v} + return Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryInstance() + } + set {resultType = .query(newValue)} + } + + var awaitAnyTermination: Spark_Connect_StreamingQueryManagerCommandResult.AwaitAnyTerminationResult { + get { + if case .awaitAnyTermination(let v)? = resultType {return v} + return Spark_Connect_StreamingQueryManagerCommandResult.AwaitAnyTerminationResult() + } + set {resultType = .awaitAnyTermination(newValue)} + } + + var resetTerminated: Bool { + get { + if case .resetTerminated(let v)? = resultType {return v} + return false + } + set {resultType = .resetTerminated(newValue)} + } + + var addListener: Bool { + get { + if case .addListener(let v)? = resultType {return v} + return false + } + set {resultType = .addListener(newValue)} + } + + var removeListener: Bool { + get { + if case .removeListener(let v)? = resultType {return v} + return false + } + set {resultType = .removeListener(newValue)} + } + + var listListeners: Spark_Connect_StreamingQueryManagerCommandResult.ListStreamingQueryListenerResult { + get { + if case .listListeners(let v)? = resultType {return v} + return Spark_Connect_StreamingQueryManagerCommandResult.ListStreamingQueryListenerResult() + } + set {resultType = .listListeners(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_ResultType: Equatable, Sendable { + case active(Spark_Connect_StreamingQueryManagerCommandResult.ActiveResult) + case query(Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryInstance) + case awaitAnyTermination(Spark_Connect_StreamingQueryManagerCommandResult.AwaitAnyTerminationResult) + case resetTerminated(Bool) + case addListener(Bool) + case removeListener(Bool) + case listListeners(Spark_Connect_StreamingQueryManagerCommandResult.ListStreamingQueryListenerResult) + + } + + struct ActiveResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var activeQueries: [Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryInstance] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct StreamingQueryInstance: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The id and runId of this query. + var id: Spark_Connect_StreamingQueryInstanceId { + get {return _id ?? Spark_Connect_StreamingQueryInstanceId()} + set {_id = newValue} + } + /// Returns true if `id` has been explicitly set. + var hasID: Bool {return self._id != nil} + /// Clears the value of `id`. Subsequent reads from it will return its default value. + mutating func clearID() {self._id = nil} + + /// (Optional) The name of this query. + var name: String { + get {return _name ?? String()} + set {_name = newValue} + } + /// Returns true if `name` has been explicitly set. + var hasName: Bool {return self._name != nil} + /// Clears the value of `name`. Subsequent reads from it will return its default value. + mutating func clearName() {self._name = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _id: Spark_Connect_StreamingQueryInstanceId? = nil + fileprivate var _name: String? = nil + } + + struct AwaitAnyTerminationResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var terminated: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct StreamingQueryListenerInstance: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var listenerPayload: Data = Data() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct ListStreamingQueryListenerResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Reference IDs of listener instances. + var listenerIds: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} +} + +/// The protocol for client-side StreamingQueryListener. +/// This command will only be set when either the first listener is added to the client, or the last +/// listener is removed from the client. +/// The add_listener_bus_listener command will only be set true in the first case. +/// The remove_listener_bus_listener command will only be set true in the second case. +struct Spark_Connect_StreamingQueryListenerBusCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var command: Spark_Connect_StreamingQueryListenerBusCommand.OneOf_Command? = nil + + var addListenerBusListener: Bool { + get { + if case .addListenerBusListener(let v)? = command {return v} + return false + } + set {command = .addListenerBusListener(newValue)} + } + + var removeListenerBusListener: Bool { + get { + if case .removeListenerBusListener(let v)? = command {return v} + return false + } + set {command = .removeListenerBusListener(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Command: Equatable, Sendable { + case addListenerBusListener(Bool) + case removeListenerBusListener(Bool) + + } + + init() {} +} + +/// The protocol for the returned events in the long-running response channel. +struct Spark_Connect_StreamingQueryListenerEvent: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The json serialized event, all StreamingQueryListener events have a json method + var eventJson: String = String() + + /// (Required) Query event type used by client to decide how to deserialize the event_json + var eventType: Spark_Connect_StreamingQueryEventType = .queryProgressUnspecified + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_StreamingQueryListenerEventsResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var events: [Spark_Connect_StreamingQueryListenerEvent] = [] + + var listenerBusListenerAdded: Bool { + get {return _listenerBusListenerAdded ?? false} + set {_listenerBusListenerAdded = newValue} + } + /// Returns true if `listenerBusListenerAdded` has been explicitly set. + var hasListenerBusListenerAdded: Bool {return self._listenerBusListenerAdded != nil} + /// Clears the value of `listenerBusListenerAdded`. Subsequent reads from it will return its default value. + mutating func clearListenerBusListenerAdded() {self._listenerBusListenerAdded = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _listenerBusListenerAdded: Bool? = nil +} + +/// Command to get the output of 'SparkContext.resources' +struct Spark_Connect_GetResourcesCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Response for command 'GetResourcesCommand'. +struct Spark_Connect_GetResourcesCommandResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var resources: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Command to create ResourceProfile +struct Spark_Connect_CreateResourceProfileCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The ResourceProfile to be built on the server-side. + var profile: Spark_Connect_ResourceProfile { + get {return _profile ?? Spark_Connect_ResourceProfile()} + set {_profile = newValue} + } + /// Returns true if `profile` has been explicitly set. + var hasProfile: Bool {return self._profile != nil} + /// Clears the value of `profile`. Subsequent reads from it will return its default value. + mutating func clearProfile() {self._profile = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _profile: Spark_Connect_ResourceProfile? = nil +} + +/// Response for command 'CreateResourceProfileCommand'. +struct Spark_Connect_CreateResourceProfileCommandResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Server-side generated resource profile id. + var profileID: Int32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Command to remove `CashedRemoteRelation` +struct Spark_Connect_RemoveCachedRemoteRelationCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The remote to be related + var relation: Spark_Connect_CachedRemoteRelation { + get {return _relation ?? Spark_Connect_CachedRemoteRelation()} + set {_relation = newValue} + } + /// Returns true if `relation` has been explicitly set. + var hasRelation: Bool {return self._relation != nil} + /// Clears the value of `relation`. Subsequent reads from it will return its default value. + mutating func clearRelation() {self._relation = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _relation: Spark_Connect_CachedRemoteRelation? = nil +} + +struct Spark_Connect_CheckpointCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The logical plan to checkpoint. + var relation: Spark_Connect_Relation { + get {return _relation ?? Spark_Connect_Relation()} + set {_relation = newValue} + } + /// Returns true if `relation` has been explicitly set. + var hasRelation: Bool {return self._relation != nil} + /// Clears the value of `relation`. Subsequent reads from it will return its default value. + mutating func clearRelation() {self._relation = nil} + + /// (Required) Locally checkpoint using a local temporary + /// directory in Spark Connect server (Spark Driver) + var local: Bool = false + + /// (Required) Whether to checkpoint this dataframe immediately. + var eager: Bool = false + + /// (Optional) For local checkpoint, the storage level to use. + var storageLevel: Spark_Connect_StorageLevel { + get {return _storageLevel ?? Spark_Connect_StorageLevel()} + set {_storageLevel = newValue} + } + /// Returns true if `storageLevel` has been explicitly set. + var hasStorageLevel: Bool {return self._storageLevel != nil} + /// Clears the value of `storageLevel`. Subsequent reads from it will return its default value. + mutating func clearStorageLevel() {self._storageLevel = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _relation: Spark_Connect_Relation? = nil + fileprivate var _storageLevel: Spark_Connect_StorageLevel? = nil +} + +struct Spark_Connect_MergeIntoTableCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The name of the target table. + var targetTableName: String = String() + + /// (Required) The relation of the source table. + var sourceTablePlan: Spark_Connect_Relation { + get {return _sourceTablePlan ?? Spark_Connect_Relation()} + set {_sourceTablePlan = newValue} + } + /// Returns true if `sourceTablePlan` has been explicitly set. + var hasSourceTablePlan: Bool {return self._sourceTablePlan != nil} + /// Clears the value of `sourceTablePlan`. Subsequent reads from it will return its default value. + mutating func clearSourceTablePlan() {self._sourceTablePlan = nil} + + /// (Required) The condition to match the source and target. + var mergeCondition: Spark_Connect_Expression { + get {return _mergeCondition ?? Spark_Connect_Expression()} + set {_mergeCondition = newValue} + } + /// Returns true if `mergeCondition` has been explicitly set. + var hasMergeCondition: Bool {return self._mergeCondition != nil} + /// Clears the value of `mergeCondition`. Subsequent reads from it will return its default value. + mutating func clearMergeCondition() {self._mergeCondition = nil} + + /// (Optional) The actions to be taken when the condition is matched. + var matchActions: [Spark_Connect_Expression] = [] + + /// (Optional) The actions to be taken when the condition is not matched. + var notMatchedActions: [Spark_Connect_Expression] = [] + + /// (Optional) The actions to be taken when the condition is not matched by source. + var notMatchedBySourceActions: [Spark_Connect_Expression] = [] + + /// (Required) Whether to enable schema evolution. + var withSchemaEvolution: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _sourceTablePlan: Spark_Connect_Relation? = nil + fileprivate var _mergeCondition: Spark_Connect_Expression? = nil +} + +/// Execute an arbitrary string command inside an external execution engine +struct Spark_Connect_ExecuteExternalCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The class name of the runner that implements `ExternalCommandRunner` + var runner: String = String() + + /// (Required) The target command to be executed. + var command: String = String() + + /// (Optional) The options for the runner. + var options: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_StreamingQueryEventType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "QUERY_PROGRESS_UNSPECIFIED"), + 1: .same(proto: "QUERY_PROGRESS_EVENT"), + 2: .same(proto: "QUERY_TERMINATED_EVENT"), + 3: .same(proto: "QUERY_IDLE_EVENT"), + ] +} + +extension Spark_Connect_Command: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Command" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "register_function"), + 2: .standard(proto: "write_operation"), + 3: .standard(proto: "create_dataframe_view"), + 4: .standard(proto: "write_operation_v2"), + 5: .standard(proto: "sql_command"), + 6: .standard(proto: "write_stream_operation_start"), + 7: .standard(proto: "streaming_query_command"), + 8: .standard(proto: "get_resources_command"), + 9: .standard(proto: "streaming_query_manager_command"), + 10: .standard(proto: "register_table_function"), + 11: .standard(proto: "streaming_query_listener_bus_command"), + 12: .standard(proto: "register_data_source"), + 13: .standard(proto: "create_resource_profile_command"), + 14: .standard(proto: "checkpoint_command"), + 15: .standard(proto: "remove_cached_remote_relation_command"), + 16: .standard(proto: "merge_into_table_command"), + 17: .standard(proto: "ml_command"), + 18: .standard(proto: "execute_external_command"), + 999: .same(proto: "extension"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_CommonInlineUserDefinedFunction? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .registerFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .registerFunction(v) + } + }() + case 2: try { + var v: Spark_Connect_WriteOperation? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .writeOperation(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .writeOperation(v) + } + }() + case 3: try { + var v: Spark_Connect_CreateDataFrameViewCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .createDataframeView(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .createDataframeView(v) + } + }() + case 4: try { + var v: Spark_Connect_WriteOperationV2? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .writeOperationV2(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .writeOperationV2(v) + } + }() + case 5: try { + var v: Spark_Connect_SqlCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .sqlCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .sqlCommand(v) + } + }() + case 6: try { + var v: Spark_Connect_WriteStreamOperationStart? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .writeStreamOperationStart(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .writeStreamOperationStart(v) + } + }() + case 7: try { + var v: Spark_Connect_StreamingQueryCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .streamingQueryCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .streamingQueryCommand(v) + } + }() + case 8: try { + var v: Spark_Connect_GetResourcesCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .getResourcesCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .getResourcesCommand(v) + } + }() + case 9: try { + var v: Spark_Connect_StreamingQueryManagerCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .streamingQueryManagerCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .streamingQueryManagerCommand(v) + } + }() + case 10: try { + var v: Spark_Connect_CommonInlineUserDefinedTableFunction? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .registerTableFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .registerTableFunction(v) + } + }() + case 11: try { + var v: Spark_Connect_StreamingQueryListenerBusCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .streamingQueryListenerBusCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .streamingQueryListenerBusCommand(v) + } + }() + case 12: try { + var v: Spark_Connect_CommonInlineUserDefinedDataSource? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .registerDataSource(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .registerDataSource(v) + } + }() + case 13: try { + var v: Spark_Connect_CreateResourceProfileCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .createResourceProfileCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .createResourceProfileCommand(v) + } + }() + case 14: try { + var v: Spark_Connect_CheckpointCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .checkpointCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .checkpointCommand(v) + } + }() + case 15: try { + var v: Spark_Connect_RemoveCachedRemoteRelationCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .removeCachedRemoteRelationCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .removeCachedRemoteRelationCommand(v) + } + }() + case 16: try { + var v: Spark_Connect_MergeIntoTableCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .mergeIntoTableCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .mergeIntoTableCommand(v) + } + }() + case 17: try { + var v: Spark_Connect_MlCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .mlCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .mlCommand(v) + } + }() + case 18: try { + var v: Spark_Connect_ExecuteExternalCommand? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .executeExternalCommand(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .executeExternalCommand(v) + } + }() + case 999: try { + var v: SwiftProtobuf.Google_Protobuf_Any? + var hadOneofValue = false + if let current = self.commandType { + hadOneofValue = true + if case .extension(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.commandType = .extension(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.commandType { + case .registerFunction?: try { + guard case .registerFunction(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .writeOperation?: try { + guard case .writeOperation(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .createDataframeView?: try { + guard case .createDataframeView(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .writeOperationV2?: try { + guard case .writeOperationV2(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .sqlCommand?: try { + guard case .sqlCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .writeStreamOperationStart?: try { + guard case .writeStreamOperationStart(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case .streamingQueryCommand?: try { + guard case .streamingQueryCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case .getResourcesCommand?: try { + guard case .getResourcesCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + }() + case .streamingQueryManagerCommand?: try { + guard case .streamingQueryManagerCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 9) + }() + case .registerTableFunction?: try { + guard case .registerTableFunction(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 10) + }() + case .streamingQueryListenerBusCommand?: try { + guard case .streamingQueryListenerBusCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 11) + }() + case .registerDataSource?: try { + guard case .registerDataSource(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 12) + }() + case .createResourceProfileCommand?: try { + guard case .createResourceProfileCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 13) + }() + case .checkpointCommand?: try { + guard case .checkpointCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 14) + }() + case .removeCachedRemoteRelationCommand?: try { + guard case .removeCachedRemoteRelationCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 15) + }() + case .mergeIntoTableCommand?: try { + guard case .mergeIntoTableCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 16) + }() + case .mlCommand?: try { + guard case .mlCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 17) + }() + case .executeExternalCommand?: try { + guard case .executeExternalCommand(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 18) + }() + case .extension?: try { + guard case .extension(let v)? = self.commandType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 999) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Command, rhs: Spark_Connect_Command) -> Bool { + if lhs.commandType != rhs.commandType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_SqlCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".SqlCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "sql"), + 2: .same(proto: "args"), + 3: .standard(proto: "pos_args"), + 4: .standard(proto: "named_arguments"), + 5: .standard(proto: "pos_arguments"), + 6: .same(proto: "input"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sql) }() + case 2: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.args) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &self.posArgs) }() + case 4: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.namedArguments) }() + case 5: try { try decoder.decodeRepeatedMessageField(value: &self.posArguments) }() + case 6: try { try decoder.decodeSingularMessageField(value: &self._input) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sql.isEmpty { + try visitor.visitSingularStringField(value: self.sql, fieldNumber: 1) + } + if !self.args.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.args, fieldNumber: 2) + } + if !self.posArgs.isEmpty { + try visitor.visitRepeatedMessageField(value: self.posArgs, fieldNumber: 3) + } + if !self.namedArguments.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.namedArguments, fieldNumber: 4) + } + if !self.posArguments.isEmpty { + try visitor.visitRepeatedMessageField(value: self.posArguments, fieldNumber: 5) + } + try { if let v = self._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_SqlCommand, rhs: Spark_Connect_SqlCommand) -> Bool { + if lhs.sql != rhs.sql {return false} + if lhs.args != rhs.args {return false} + if lhs.posArgs != rhs.posArgs {return false} + if lhs.namedArguments != rhs.namedArguments {return false} + if lhs.posArguments != rhs.posArguments {return false} + if lhs._input != rhs._input {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CreateDataFrameViewCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CreateDataFrameViewCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "name"), + 3: .standard(proto: "is_global"), + 4: .same(proto: "replace"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 3: try { try decoder.decodeSingularBoolField(value: &self.isGlobal) }() + case 4: try { try decoder.decodeSingularBoolField(value: &self.replace) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 2) + } + if self.isGlobal != false { + try visitor.visitSingularBoolField(value: self.isGlobal, fieldNumber: 3) + } + if self.replace != false { + try visitor.visitSingularBoolField(value: self.replace, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CreateDataFrameViewCommand, rhs: Spark_Connect_CreateDataFrameViewCommand) -> Bool { + if lhs._input != rhs._input {return false} + if lhs.name != rhs.name {return false} + if lhs.isGlobal != rhs.isGlobal {return false} + if lhs.replace != rhs.replace {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WriteOperation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".WriteOperation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "source"), + 3: .same(proto: "path"), + 4: .same(proto: "table"), + 5: .same(proto: "mode"), + 6: .standard(proto: "sort_column_names"), + 7: .standard(proto: "partitioning_columns"), + 8: .standard(proto: "bucket_by"), + 9: .same(proto: "options"), + 10: .standard(proto: "clustering_columns"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._source) }() + case 3: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if self.saveType != nil {try decoder.handleConflictingOneOf()} + self.saveType = .path(v) + } + }() + case 4: try { + var v: Spark_Connect_WriteOperation.SaveTable? + var hadOneofValue = false + if let current = self.saveType { + hadOneofValue = true + if case .table(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.saveType = .table(v) + } + }() + case 5: try { try decoder.decodeSingularEnumField(value: &self.mode) }() + case 6: try { try decoder.decodeRepeatedStringField(value: &self.sortColumnNames) }() + case 7: try { try decoder.decodeRepeatedStringField(value: &self.partitioningColumns) }() + case 8: try { try decoder.decodeSingularMessageField(value: &self._bucketBy) }() + case 9: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.options) }() + case 10: try { try decoder.decodeRepeatedStringField(value: &self.clusteringColumns) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._source { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + switch self.saveType { + case .path?: try { + guard case .path(let v)? = self.saveType else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + }() + case .table?: try { + guard case .table(let v)? = self.saveType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case nil: break + } + if self.mode != .unspecified { + try visitor.visitSingularEnumField(value: self.mode, fieldNumber: 5) + } + if !self.sortColumnNames.isEmpty { + try visitor.visitRepeatedStringField(value: self.sortColumnNames, fieldNumber: 6) + } + if !self.partitioningColumns.isEmpty { + try visitor.visitRepeatedStringField(value: self.partitioningColumns, fieldNumber: 7) + } + try { if let v = self._bucketBy { + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + } }() + if !self.options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.options, fieldNumber: 9) + } + if !self.clusteringColumns.isEmpty { + try visitor.visitRepeatedStringField(value: self.clusteringColumns, fieldNumber: 10) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WriteOperation, rhs: Spark_Connect_WriteOperation) -> Bool { + if lhs._input != rhs._input {return false} + if lhs._source != rhs._source {return false} + if lhs.saveType != rhs.saveType {return false} + if lhs.mode != rhs.mode {return false} + if lhs.sortColumnNames != rhs.sortColumnNames {return false} + if lhs.partitioningColumns != rhs.partitioningColumns {return false} + if lhs._bucketBy != rhs._bucketBy {return false} + if lhs.options != rhs.options {return false} + if lhs.clusteringColumns != rhs.clusteringColumns {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WriteOperation.SaveMode: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SAVE_MODE_UNSPECIFIED"), + 1: .same(proto: "SAVE_MODE_APPEND"), + 2: .same(proto: "SAVE_MODE_OVERWRITE"), + 3: .same(proto: "SAVE_MODE_ERROR_IF_EXISTS"), + 4: .same(proto: "SAVE_MODE_IGNORE"), + ] +} + +extension Spark_Connect_WriteOperation.SaveTable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_WriteOperation.protoMessageName + ".SaveTable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "table_name"), + 2: .standard(proto: "save_method"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + case 2: try { try decoder.decodeSingularEnumField(value: &self.saveMethod) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 1) + } + if self.saveMethod != .unspecified { + try visitor.visitSingularEnumField(value: self.saveMethod, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WriteOperation.SaveTable, rhs: Spark_Connect_WriteOperation.SaveTable) -> Bool { + if lhs.tableName != rhs.tableName {return false} + if lhs.saveMethod != rhs.saveMethod {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WriteOperation.SaveTable.TableSaveMethod: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "TABLE_SAVE_METHOD_UNSPECIFIED"), + 1: .same(proto: "TABLE_SAVE_METHOD_SAVE_AS_TABLE"), + 2: .same(proto: "TABLE_SAVE_METHOD_INSERT_INTO"), + ] +} + +extension Spark_Connect_WriteOperation.BucketBy: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_WriteOperation.protoMessageName + ".BucketBy" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "bucket_column_names"), + 2: .standard(proto: "num_buckets"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.bucketColumnNames) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self.numBuckets) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.bucketColumnNames.isEmpty { + try visitor.visitRepeatedStringField(value: self.bucketColumnNames, fieldNumber: 1) + } + if self.numBuckets != 0 { + try visitor.visitSingularInt32Field(value: self.numBuckets, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WriteOperation.BucketBy, rhs: Spark_Connect_WriteOperation.BucketBy) -> Bool { + if lhs.bucketColumnNames != rhs.bucketColumnNames {return false} + if lhs.numBuckets != rhs.numBuckets {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WriteOperationV2: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".WriteOperationV2" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "table_name"), + 3: .same(proto: "provider"), + 4: .standard(proto: "partitioning_columns"), + 5: .same(proto: "options"), + 6: .standard(proto: "table_properties"), + 7: .same(proto: "mode"), + 8: .standard(proto: "overwrite_condition"), + 9: .standard(proto: "clustering_columns"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.tableName) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._provider) }() + case 4: try { try decoder.decodeRepeatedMessageField(value: &self.partitioningColumns) }() + case 5: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.options) }() + case 6: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.tableProperties) }() + case 7: try { try decoder.decodeSingularEnumField(value: &self.mode) }() + case 8: try { try decoder.decodeSingularMessageField(value: &self._overwriteCondition) }() + case 9: try { try decoder.decodeRepeatedStringField(value: &self.clusteringColumns) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.tableName.isEmpty { + try visitor.visitSingularStringField(value: self.tableName, fieldNumber: 2) + } + try { if let v = self._provider { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + if !self.partitioningColumns.isEmpty { + try visitor.visitRepeatedMessageField(value: self.partitioningColumns, fieldNumber: 4) + } + if !self.options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.options, fieldNumber: 5) + } + if !self.tableProperties.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.tableProperties, fieldNumber: 6) + } + if self.mode != .unspecified { + try visitor.visitSingularEnumField(value: self.mode, fieldNumber: 7) + } + try { if let v = self._overwriteCondition { + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + } }() + if !self.clusteringColumns.isEmpty { + try visitor.visitRepeatedStringField(value: self.clusteringColumns, fieldNumber: 9) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WriteOperationV2, rhs: Spark_Connect_WriteOperationV2) -> Bool { + if lhs._input != rhs._input {return false} + if lhs.tableName != rhs.tableName {return false} + if lhs._provider != rhs._provider {return false} + if lhs.partitioningColumns != rhs.partitioningColumns {return false} + if lhs.options != rhs.options {return false} + if lhs.tableProperties != rhs.tableProperties {return false} + if lhs.mode != rhs.mode {return false} + if lhs._overwriteCondition != rhs._overwriteCondition {return false} + if lhs.clusteringColumns != rhs.clusteringColumns {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WriteOperationV2.Mode: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "MODE_UNSPECIFIED"), + 1: .same(proto: "MODE_CREATE"), + 2: .same(proto: "MODE_OVERWRITE"), + 3: .same(proto: "MODE_OVERWRITE_PARTITIONS"), + 4: .same(proto: "MODE_APPEND"), + 5: .same(proto: "MODE_REPLACE"), + 6: .same(proto: "MODE_CREATE_OR_REPLACE"), + ] +} + +extension Spark_Connect_WriteStreamOperationStart: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".WriteStreamOperationStart" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "format"), + 3: .same(proto: "options"), + 4: .standard(proto: "partitioning_column_names"), + 5: .standard(proto: "processing_time_interval"), + 6: .standard(proto: "available_now"), + 7: .same(proto: "once"), + 8: .standard(proto: "continuous_checkpoint_interval"), + 9: .standard(proto: "output_mode"), + 10: .standard(proto: "query_name"), + 11: .same(proto: "path"), + 12: .standard(proto: "table_name"), + 13: .standard(proto: "foreach_writer"), + 14: .standard(proto: "foreach_batch"), + 15: .standard(proto: "clustering_column_names"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _format: String = String() + var _options: Dictionary = [:] + var _partitioningColumnNames: [String] = [] + var _trigger: Spark_Connect_WriteStreamOperationStart.OneOf_Trigger? + var _outputMode: String = String() + var _queryName: String = String() + var _sinkDestination: Spark_Connect_WriteStreamOperationStart.OneOf_SinkDestination? + var _foreachWriter: Spark_Connect_StreamingForeachFunction? = nil + var _foreachBatch: Spark_Connect_StreamingForeachFunction? = nil + var _clusteringColumnNames: [String] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _format = source._format + _options = source._options + _partitioningColumnNames = source._partitioningColumnNames + _trigger = source._trigger + _outputMode = source._outputMode + _queryName = source._queryName + _sinkDestination = source._sinkDestination + _foreachWriter = source._foreachWriter + _foreachBatch = source._foreachBatch + _clusteringColumnNames = source._clusteringColumnNames + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._format) }() + case 3: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &_storage._options) }() + case 4: try { try decoder.decodeRepeatedStringField(value: &_storage._partitioningColumnNames) }() + case 5: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if _storage._trigger != nil {try decoder.handleConflictingOneOf()} + _storage._trigger = .processingTimeInterval(v) + } + }() + case 6: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if _storage._trigger != nil {try decoder.handleConflictingOneOf()} + _storage._trigger = .availableNow(v) + } + }() + case 7: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if _storage._trigger != nil {try decoder.handleConflictingOneOf()} + _storage._trigger = .once(v) + } + }() + case 8: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if _storage._trigger != nil {try decoder.handleConflictingOneOf()} + _storage._trigger = .continuousCheckpointInterval(v) + } + }() + case 9: try { try decoder.decodeSingularStringField(value: &_storage._outputMode) }() + case 10: try { try decoder.decodeSingularStringField(value: &_storage._queryName) }() + case 11: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if _storage._sinkDestination != nil {try decoder.handleConflictingOneOf()} + _storage._sinkDestination = .path(v) + } + }() + case 12: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if _storage._sinkDestination != nil {try decoder.handleConflictingOneOf()} + _storage._sinkDestination = .tableName(v) + } + }() + case 13: try { try decoder.decodeSingularMessageField(value: &_storage._foreachWriter) }() + case 14: try { try decoder.decodeSingularMessageField(value: &_storage._foreachBatch) }() + case 15: try { try decoder.decodeRepeatedStringField(value: &_storage._clusteringColumnNames) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._format.isEmpty { + try visitor.visitSingularStringField(value: _storage._format, fieldNumber: 2) + } + if !_storage._options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: _storage._options, fieldNumber: 3) + } + if !_storage._partitioningColumnNames.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._partitioningColumnNames, fieldNumber: 4) + } + switch _storage._trigger { + case .processingTimeInterval?: try { + guard case .processingTimeInterval(let v)? = _storage._trigger else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 5) + }() + case .availableNow?: try { + guard case .availableNow(let v)? = _storage._trigger else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 6) + }() + case .once?: try { + guard case .once(let v)? = _storage._trigger else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 7) + }() + case .continuousCheckpointInterval?: try { + guard case .continuousCheckpointInterval(let v)? = _storage._trigger else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 8) + }() + case nil: break + } + if !_storage._outputMode.isEmpty { + try visitor.visitSingularStringField(value: _storage._outputMode, fieldNumber: 9) + } + if !_storage._queryName.isEmpty { + try visitor.visitSingularStringField(value: _storage._queryName, fieldNumber: 10) + } + switch _storage._sinkDestination { + case .path?: try { + guard case .path(let v)? = _storage._sinkDestination else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 11) + }() + case .tableName?: try { + guard case .tableName(let v)? = _storage._sinkDestination else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 12) + }() + case nil: break + } + try { if let v = _storage._foreachWriter { + try visitor.visitSingularMessageField(value: v, fieldNumber: 13) + } }() + try { if let v = _storage._foreachBatch { + try visitor.visitSingularMessageField(value: v, fieldNumber: 14) + } }() + if !_storage._clusteringColumnNames.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._clusteringColumnNames, fieldNumber: 15) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WriteStreamOperationStart, rhs: Spark_Connect_WriteStreamOperationStart) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._format != rhs_storage._format {return false} + if _storage._options != rhs_storage._options {return false} + if _storage._partitioningColumnNames != rhs_storage._partitioningColumnNames {return false} + if _storage._trigger != rhs_storage._trigger {return false} + if _storage._outputMode != rhs_storage._outputMode {return false} + if _storage._queryName != rhs_storage._queryName {return false} + if _storage._sinkDestination != rhs_storage._sinkDestination {return false} + if _storage._foreachWriter != rhs_storage._foreachWriter {return false} + if _storage._foreachBatch != rhs_storage._foreachBatch {return false} + if _storage._clusteringColumnNames != rhs_storage._clusteringColumnNames {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingForeachFunction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StreamingForeachFunction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "python_function"), + 2: .standard(proto: "scala_function"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_PythonUDF? + var hadOneofValue = false + if let current = self.function { + hadOneofValue = true + if case .pythonFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.function = .pythonFunction(v) + } + }() + case 2: try { + var v: Spark_Connect_ScalarScalaUDF? + var hadOneofValue = false + if let current = self.function { + hadOneofValue = true + if case .scalaFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.function = .scalaFunction(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.function { + case .pythonFunction?: try { + guard case .pythonFunction(let v)? = self.function else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .scalaFunction?: try { + guard case .scalaFunction(let v)? = self.function else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingForeachFunction, rhs: Spark_Connect_StreamingForeachFunction) -> Bool { + if lhs.function != rhs.function {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WriteStreamOperationStartResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".WriteStreamOperationStartResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "query_id"), + 2: .same(proto: "name"), + 3: .standard(proto: "query_started_event_json"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._queryID) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._queryStartedEventJson) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._queryID { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 2) + } + try { if let v = self._queryStartedEventJson { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WriteStreamOperationStartResult, rhs: Spark_Connect_WriteStreamOperationStartResult) -> Bool { + if lhs._queryID != rhs._queryID {return false} + if lhs.name != rhs.name {return false} + if lhs._queryStartedEventJson != rhs._queryStartedEventJson {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryInstanceId: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StreamingQueryInstanceId" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "id"), + 2: .standard(proto: "run_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.id) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.runID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.id.isEmpty { + try visitor.visitSingularStringField(value: self.id, fieldNumber: 1) + } + if !self.runID.isEmpty { + try visitor.visitSingularStringField(value: self.runID, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryInstanceId, rhs: Spark_Connect_StreamingQueryInstanceId) -> Bool { + if lhs.id != rhs.id {return false} + if lhs.runID != rhs.runID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StreamingQueryCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "query_id"), + 2: .same(proto: "status"), + 3: .standard(proto: "last_progress"), + 4: .standard(proto: "recent_progress"), + 5: .same(proto: "stop"), + 6: .standard(proto: "process_all_available"), + 7: .same(proto: "explain"), + 8: .same(proto: "exception"), + 9: .standard(proto: "await_termination"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._queryID) }() + case 2: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .status(v) + } + }() + case 3: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .lastProgress(v) + } + }() + case 4: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .recentProgress(v) + } + }() + case 5: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .stop(v) + } + }() + case 6: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .processAllAvailable(v) + } + }() + case 7: try { + var v: Spark_Connect_StreamingQueryCommand.ExplainCommand? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .explain(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .explain(v) + } + }() + case 8: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .exception(v) + } + }() + case 9: try { + var v: Spark_Connect_StreamingQueryCommand.AwaitTerminationCommand? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .awaitTermination(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .awaitTermination(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._queryID { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + switch self.command { + case .status?: try { + guard case .status(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 2) + }() + case .lastProgress?: try { + guard case .lastProgress(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 3) + }() + case .recentProgress?: try { + guard case .recentProgress(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 4) + }() + case .stop?: try { + guard case .stop(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 5) + }() + case .processAllAvailable?: try { + guard case .processAllAvailable(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 6) + }() + case .explain?: try { + guard case .explain(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case .exception?: try { + guard case .exception(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 8) + }() + case .awaitTermination?: try { + guard case .awaitTermination(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 9) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryCommand, rhs: Spark_Connect_StreamingQueryCommand) -> Bool { + if lhs._queryID != rhs._queryID {return false} + if lhs.command != rhs.command {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryCommand.ExplainCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryCommand.protoMessageName + ".ExplainCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "extended"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.extended) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.extended != false { + try visitor.visitSingularBoolField(value: self.extended, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryCommand.ExplainCommand, rhs: Spark_Connect_StreamingQueryCommand.ExplainCommand) -> Bool { + if lhs.extended != rhs.extended {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryCommand.AwaitTerminationCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryCommand.protoMessageName + ".AwaitTerminationCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 2: .standard(proto: "timeout_ms"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 2: try { try decoder.decodeSingularInt64Field(value: &self._timeoutMs) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._timeoutMs { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryCommand.AwaitTerminationCommand, rhs: Spark_Connect_StreamingQueryCommand.AwaitTerminationCommand) -> Bool { + if lhs._timeoutMs != rhs._timeoutMs {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryCommandResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StreamingQueryCommandResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "query_id"), + 2: .same(proto: "status"), + 3: .standard(proto: "recent_progress"), + 4: .same(proto: "explain"), + 5: .same(proto: "exception"), + 6: .standard(proto: "await_termination"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._queryID) }() + case 2: try { + var v: Spark_Connect_StreamingQueryCommandResult.StatusResult? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .status(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .status(v) + } + }() + case 3: try { + var v: Spark_Connect_StreamingQueryCommandResult.RecentProgressResult? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .recentProgress(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .recentProgress(v) + } + }() + case 4: try { + var v: Spark_Connect_StreamingQueryCommandResult.ExplainResult? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .explain(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .explain(v) + } + }() + case 5: try { + var v: Spark_Connect_StreamingQueryCommandResult.ExceptionResult? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .exception(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .exception(v) + } + }() + case 6: try { + var v: Spark_Connect_StreamingQueryCommandResult.AwaitTerminationResult? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .awaitTermination(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .awaitTermination(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._queryID { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + switch self.resultType { + case .status?: try { + guard case .status(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .recentProgress?: try { + guard case .recentProgress(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .explain?: try { + guard case .explain(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .exception?: try { + guard case .exception(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .awaitTermination?: try { + guard case .awaitTermination(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryCommandResult, rhs: Spark_Connect_StreamingQueryCommandResult) -> Bool { + if lhs._queryID != rhs._queryID {return false} + if lhs.resultType != rhs.resultType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryCommandResult.StatusResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryCommandResult.protoMessageName + ".StatusResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "status_message"), + 2: .standard(proto: "is_data_available"), + 3: .standard(proto: "is_trigger_active"), + 4: .standard(proto: "is_active"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.statusMessage) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self.isDataAvailable) }() + case 3: try { try decoder.decodeSingularBoolField(value: &self.isTriggerActive) }() + case 4: try { try decoder.decodeSingularBoolField(value: &self.isActive) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.statusMessage.isEmpty { + try visitor.visitSingularStringField(value: self.statusMessage, fieldNumber: 1) + } + if self.isDataAvailable != false { + try visitor.visitSingularBoolField(value: self.isDataAvailable, fieldNumber: 2) + } + if self.isTriggerActive != false { + try visitor.visitSingularBoolField(value: self.isTriggerActive, fieldNumber: 3) + } + if self.isActive != false { + try visitor.visitSingularBoolField(value: self.isActive, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryCommandResult.StatusResult, rhs: Spark_Connect_StreamingQueryCommandResult.StatusResult) -> Bool { + if lhs.statusMessage != rhs.statusMessage {return false} + if lhs.isDataAvailable != rhs.isDataAvailable {return false} + if lhs.isTriggerActive != rhs.isTriggerActive {return false} + if lhs.isActive != rhs.isActive {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryCommandResult.RecentProgressResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryCommandResult.protoMessageName + ".RecentProgressResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 5: .standard(proto: "recent_progress_json"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 5: try { try decoder.decodeRepeatedStringField(value: &self.recentProgressJson) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.recentProgressJson.isEmpty { + try visitor.visitRepeatedStringField(value: self.recentProgressJson, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryCommandResult.RecentProgressResult, rhs: Spark_Connect_StreamingQueryCommandResult.RecentProgressResult) -> Bool { + if lhs.recentProgressJson != rhs.recentProgressJson {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryCommandResult.ExplainResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryCommandResult.protoMessageName + ".ExplainResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "result"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.result) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.result.isEmpty { + try visitor.visitSingularStringField(value: self.result, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryCommandResult.ExplainResult, rhs: Spark_Connect_StreamingQueryCommandResult.ExplainResult) -> Bool { + if lhs.result != rhs.result {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryCommandResult.ExceptionResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryCommandResult.protoMessageName + ".ExceptionResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "exception_message"), + 2: .standard(proto: "error_class"), + 3: .standard(proto: "stack_trace"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._exceptionMessage) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._errorClass) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._stackTrace) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._exceptionMessage { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._errorClass { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try { if let v = self._stackTrace { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryCommandResult.ExceptionResult, rhs: Spark_Connect_StreamingQueryCommandResult.ExceptionResult) -> Bool { + if lhs._exceptionMessage != rhs._exceptionMessage {return false} + if lhs._errorClass != rhs._errorClass {return false} + if lhs._stackTrace != rhs._stackTrace {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryCommandResult.AwaitTerminationResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryCommandResult.protoMessageName + ".AwaitTerminationResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "terminated"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.terminated) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.terminated != false { + try visitor.visitSingularBoolField(value: self.terminated, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryCommandResult.AwaitTerminationResult, rhs: Spark_Connect_StreamingQueryCommandResult.AwaitTerminationResult) -> Bool { + if lhs.terminated != rhs.terminated {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryManagerCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StreamingQueryManagerCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "active"), + 2: .standard(proto: "get_query"), + 3: .standard(proto: "await_any_termination"), + 4: .standard(proto: "reset_terminated"), + 5: .standard(proto: "add_listener"), + 6: .standard(proto: "remove_listener"), + 7: .standard(proto: "list_listeners"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .active(v) + } + }() + case 2: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .getQuery(v) + } + }() + case 3: try { + var v: Spark_Connect_StreamingQueryManagerCommand.AwaitAnyTerminationCommand? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .awaitAnyTermination(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .awaitAnyTermination(v) + } + }() + case 4: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .resetTerminated(v) + } + }() + case 5: try { + var v: Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .addListener(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .addListener(v) + } + }() + case 6: try { + var v: Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .removeListener(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .removeListener(v) + } + }() + case 7: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .listListeners(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.command { + case .active?: try { + guard case .active(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 1) + }() + case .getQuery?: try { + guard case .getQuery(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + }() + case .awaitAnyTermination?: try { + guard case .awaitAnyTermination(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .resetTerminated?: try { + guard case .resetTerminated(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 4) + }() + case .addListener?: try { + guard case .addListener(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .removeListener?: try { + guard case .removeListener(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case .listListeners?: try { + guard case .listListeners(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 7) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryManagerCommand, rhs: Spark_Connect_StreamingQueryManagerCommand) -> Bool { + if lhs.command != rhs.command {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryManagerCommand.AwaitAnyTerminationCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryManagerCommand.protoMessageName + ".AwaitAnyTerminationCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "timeout_ms"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt64Field(value: &self._timeoutMs) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._timeoutMs { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryManagerCommand.AwaitAnyTerminationCommand, rhs: Spark_Connect_StreamingQueryManagerCommand.AwaitAnyTerminationCommand) -> Bool { + if lhs._timeoutMs != rhs._timeoutMs {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryManagerCommand.protoMessageName + ".StreamingQueryListenerCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "listener_payload"), + 2: .standard(proto: "python_listener_payload"), + 3: .same(proto: "id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBytesField(value: &self.listenerPayload) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._pythonListenerPayload) }() + case 3: try { try decoder.decodeSingularStringField(value: &self.id) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.listenerPayload.isEmpty { + try visitor.visitSingularBytesField(value: self.listenerPayload, fieldNumber: 1) + } + try { if let v = self._pythonListenerPayload { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if !self.id.isEmpty { + try visitor.visitSingularStringField(value: self.id, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand, rhs: Spark_Connect_StreamingQueryManagerCommand.StreamingQueryListenerCommand) -> Bool { + if lhs.listenerPayload != rhs.listenerPayload {return false} + if lhs._pythonListenerPayload != rhs._pythonListenerPayload {return false} + if lhs.id != rhs.id {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryManagerCommandResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StreamingQueryManagerCommandResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "active"), + 2: .same(proto: "query"), + 3: .standard(proto: "await_any_termination"), + 4: .standard(proto: "reset_terminated"), + 5: .standard(proto: "add_listener"), + 6: .standard(proto: "remove_listener"), + 7: .standard(proto: "list_listeners"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_StreamingQueryManagerCommandResult.ActiveResult? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .active(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .active(v) + } + }() + case 2: try { + var v: Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryInstance? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .query(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .query(v) + } + }() + case 3: try { + var v: Spark_Connect_StreamingQueryManagerCommandResult.AwaitAnyTerminationResult? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .awaitAnyTermination(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .awaitAnyTermination(v) + } + }() + case 4: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.resultType != nil {try decoder.handleConflictingOneOf()} + self.resultType = .resetTerminated(v) + } + }() + case 5: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.resultType != nil {try decoder.handleConflictingOneOf()} + self.resultType = .addListener(v) + } + }() + case 6: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.resultType != nil {try decoder.handleConflictingOneOf()} + self.resultType = .removeListener(v) + } + }() + case 7: try { + var v: Spark_Connect_StreamingQueryManagerCommandResult.ListStreamingQueryListenerResult? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .listListeners(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .listListeners(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.resultType { + case .active?: try { + guard case .active(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .query?: try { + guard case .query(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .awaitAnyTermination?: try { + guard case .awaitAnyTermination(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .resetTerminated?: try { + guard case .resetTerminated(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 4) + }() + case .addListener?: try { + guard case .addListener(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 5) + }() + case .removeListener?: try { + guard case .removeListener(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 6) + }() + case .listListeners?: try { + guard case .listListeners(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryManagerCommandResult, rhs: Spark_Connect_StreamingQueryManagerCommandResult) -> Bool { + if lhs.resultType != rhs.resultType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryManagerCommandResult.ActiveResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryManagerCommandResult.protoMessageName + ".ActiveResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "active_queries"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.activeQueries) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.activeQueries.isEmpty { + try visitor.visitRepeatedMessageField(value: self.activeQueries, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryManagerCommandResult.ActiveResult, rhs: Spark_Connect_StreamingQueryManagerCommandResult.ActiveResult) -> Bool { + if lhs.activeQueries != rhs.activeQueries {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryInstance: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryManagerCommandResult.protoMessageName + ".StreamingQueryInstance" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "id"), + 2: .same(proto: "name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._id) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._name) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._id { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._name { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryInstance, rhs: Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryInstance) -> Bool { + if lhs._id != rhs._id {return false} + if lhs._name != rhs._name {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryManagerCommandResult.AwaitAnyTerminationResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryManagerCommandResult.protoMessageName + ".AwaitAnyTerminationResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "terminated"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.terminated) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.terminated != false { + try visitor.visitSingularBoolField(value: self.terminated, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryManagerCommandResult.AwaitAnyTerminationResult, rhs: Spark_Connect_StreamingQueryManagerCommandResult.AwaitAnyTerminationResult) -> Bool { + if lhs.terminated != rhs.terminated {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryListenerInstance: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryManagerCommandResult.protoMessageName + ".StreamingQueryListenerInstance" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "listener_payload"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBytesField(value: &self.listenerPayload) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.listenerPayload.isEmpty { + try visitor.visitSingularBytesField(value: self.listenerPayload, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryListenerInstance, rhs: Spark_Connect_StreamingQueryManagerCommandResult.StreamingQueryListenerInstance) -> Bool { + if lhs.listenerPayload != rhs.listenerPayload {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryManagerCommandResult.ListStreamingQueryListenerResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StreamingQueryManagerCommandResult.protoMessageName + ".ListStreamingQueryListenerResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "listener_ids"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.listenerIds) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.listenerIds.isEmpty { + try visitor.visitRepeatedStringField(value: self.listenerIds, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryManagerCommandResult.ListStreamingQueryListenerResult, rhs: Spark_Connect_StreamingQueryManagerCommandResult.ListStreamingQueryListenerResult) -> Bool { + if lhs.listenerIds != rhs.listenerIds {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryListenerBusCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StreamingQueryListenerBusCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "add_listener_bus_listener"), + 2: .standard(proto: "remove_listener_bus_listener"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .addListenerBusListener(v) + } + }() + case 2: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.command != nil {try decoder.handleConflictingOneOf()} + self.command = .removeListenerBusListener(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.command { + case .addListenerBusListener?: try { + guard case .addListenerBusListener(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 1) + }() + case .removeListenerBusListener?: try { + guard case .removeListenerBusListener(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 2) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryListenerBusCommand, rhs: Spark_Connect_StreamingQueryListenerBusCommand) -> Bool { + if lhs.command != rhs.command {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryListenerEvent: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StreamingQueryListenerEvent" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "event_json"), + 2: .standard(proto: "event_type"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.eventJson) }() + case 2: try { try decoder.decodeSingularEnumField(value: &self.eventType) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.eventJson.isEmpty { + try visitor.visitSingularStringField(value: self.eventJson, fieldNumber: 1) + } + if self.eventType != .queryProgressUnspecified { + try visitor.visitSingularEnumField(value: self.eventType, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryListenerEvent, rhs: Spark_Connect_StreamingQueryListenerEvent) -> Bool { + if lhs.eventJson != rhs.eventJson {return false} + if lhs.eventType != rhs.eventType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StreamingQueryListenerEventsResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StreamingQueryListenerEventsResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "events"), + 2: .standard(proto: "listener_bus_listener_added"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.events) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self._listenerBusListenerAdded) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.events.isEmpty { + try visitor.visitRepeatedMessageField(value: self.events, fieldNumber: 1) + } + try { if let v = self._listenerBusListenerAdded { + try visitor.visitSingularBoolField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StreamingQueryListenerEventsResult, rhs: Spark_Connect_StreamingQueryListenerEventsResult) -> Bool { + if lhs.events != rhs.events {return false} + if lhs._listenerBusListenerAdded != rhs._listenerBusListenerAdded {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_GetResourcesCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".GetResourcesCommand" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_GetResourcesCommand, rhs: Spark_Connect_GetResourcesCommand) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_GetResourcesCommandResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".GetResourcesCommandResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "resources"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.resources) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.resources.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.resources, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_GetResourcesCommandResult, rhs: Spark_Connect_GetResourcesCommandResult) -> Bool { + if lhs.resources != rhs.resources {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CreateResourceProfileCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CreateResourceProfileCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "profile"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._profile) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._profile { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CreateResourceProfileCommand, rhs: Spark_Connect_CreateResourceProfileCommand) -> Bool { + if lhs._profile != rhs._profile {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CreateResourceProfileCommandResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CreateResourceProfileCommandResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "profile_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.profileID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.profileID != 0 { + try visitor.visitSingularInt32Field(value: self.profileID, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CreateResourceProfileCommandResult, rhs: Spark_Connect_CreateResourceProfileCommandResult) -> Bool { + if lhs.profileID != rhs.profileID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_RemoveCachedRemoteRelationCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".RemoveCachedRemoteRelationCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "relation"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._relation) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._relation { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_RemoveCachedRemoteRelationCommand, rhs: Spark_Connect_RemoveCachedRemoteRelationCommand) -> Bool { + if lhs._relation != rhs._relation {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CheckpointCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CheckpointCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "relation"), + 2: .same(proto: "local"), + 3: .same(proto: "eager"), + 4: .standard(proto: "storage_level"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._relation) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self.local) }() + case 3: try { try decoder.decodeSingularBoolField(value: &self.eager) }() + case 4: try { try decoder.decodeSingularMessageField(value: &self._storageLevel) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._relation { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if self.local != false { + try visitor.visitSingularBoolField(value: self.local, fieldNumber: 2) + } + if self.eager != false { + try visitor.visitSingularBoolField(value: self.eager, fieldNumber: 3) + } + try { if let v = self._storageLevel { + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CheckpointCommand, rhs: Spark_Connect_CheckpointCommand) -> Bool { + if lhs._relation != rhs._relation {return false} + if lhs.local != rhs.local {return false} + if lhs.eager != rhs.eager {return false} + if lhs._storageLevel != rhs._storageLevel {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MergeIntoTableCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".MergeIntoTableCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "target_table_name"), + 2: .standard(proto: "source_table_plan"), + 3: .standard(proto: "merge_condition"), + 4: .standard(proto: "match_actions"), + 5: .standard(proto: "not_matched_actions"), + 6: .standard(proto: "not_matched_by_source_actions"), + 7: .standard(proto: "with_schema_evolution"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.targetTableName) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._sourceTablePlan) }() + case 3: try { try decoder.decodeSingularMessageField(value: &self._mergeCondition) }() + case 4: try { try decoder.decodeRepeatedMessageField(value: &self.matchActions) }() + case 5: try { try decoder.decodeRepeatedMessageField(value: &self.notMatchedActions) }() + case 6: try { try decoder.decodeRepeatedMessageField(value: &self.notMatchedBySourceActions) }() + case 7: try { try decoder.decodeSingularBoolField(value: &self.withSchemaEvolution) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.targetTableName.isEmpty { + try visitor.visitSingularStringField(value: self.targetTableName, fieldNumber: 1) + } + try { if let v = self._sourceTablePlan { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = self._mergeCondition { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if !self.matchActions.isEmpty { + try visitor.visitRepeatedMessageField(value: self.matchActions, fieldNumber: 4) + } + if !self.notMatchedActions.isEmpty { + try visitor.visitRepeatedMessageField(value: self.notMatchedActions, fieldNumber: 5) + } + if !self.notMatchedBySourceActions.isEmpty { + try visitor.visitRepeatedMessageField(value: self.notMatchedBySourceActions, fieldNumber: 6) + } + if self.withSchemaEvolution != false { + try visitor.visitSingularBoolField(value: self.withSchemaEvolution, fieldNumber: 7) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MergeIntoTableCommand, rhs: Spark_Connect_MergeIntoTableCommand) -> Bool { + if lhs.targetTableName != rhs.targetTableName {return false} + if lhs._sourceTablePlan != rhs._sourceTablePlan {return false} + if lhs._mergeCondition != rhs._mergeCondition {return false} + if lhs.matchActions != rhs.matchActions {return false} + if lhs.notMatchedActions != rhs.notMatchedActions {return false} + if lhs.notMatchedBySourceActions != rhs.notMatchedBySourceActions {return false} + if lhs.withSchemaEvolution != rhs.withSchemaEvolution {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecuteExternalCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ExecuteExternalCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "runner"), + 2: .same(proto: "command"), + 3: .same(proto: "options"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.runner) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.command) }() + case 3: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.options) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.runner.isEmpty { + try visitor.visitSingularStringField(value: self.runner, fieldNumber: 1) + } + if !self.command.isEmpty { + try visitor.visitSingularStringField(value: self.command, fieldNumber: 2) + } + if !self.options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.options, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecuteExternalCommand, rhs: Spark_Connect_ExecuteExternalCommand) -> Bool { + if lhs.runner != rhs.runner {return false} + if lhs.command != rhs.command {return false} + if lhs.options != rhs.options {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Sources/SparkConnect/common.grpc.swift b/Sources/SparkConnect/common.grpc.swift new file mode 100644 index 0000000..27958c5 --- /dev/null +++ b/Sources/SparkConnect/common.grpc.swift @@ -0,0 +1,26 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/common.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/common.pb.swift b/Sources/SparkConnect/common.pb.swift new file mode 100644 index 0000000..6de6d35 --- /dev/null +++ b/Sources/SparkConnect/common.pb.swift @@ -0,0 +1,1115 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/common.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +/// StorageLevel for persisting Datasets/Tables. +struct Spark_Connect_StorageLevel: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Whether the cache should use disk or not. + var useDisk: Bool = false + + /// (Required) Whether the cache should use memory or not. + var useMemory: Bool = false + + /// (Required) Whether the cache should use off-heap or not. + var useOffHeap: Bool = false + + /// (Required) Whether the cached data is deserialized or not. + var deserialized: Bool = false + + /// (Required) The number of replicas. + var replication: Int32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// ResourceInformation to hold information about a type of Resource. +/// The corresponding class is 'org.apache.spark.resource.ResourceInformation' +struct Spark_Connect_ResourceInformation: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The name of the resource + var name: String = String() + + /// (Required) An array of strings describing the addresses of the resource. + var addresses: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// An executor resource request. +struct Spark_Connect_ExecutorResourceRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) resource name. + var resourceName: String = String() + + /// (Required) resource amount requesting. + var amount: Int64 = 0 + + /// Optional script used to discover the resources. + var discoveryScript: String { + get {return _discoveryScript ?? String()} + set {_discoveryScript = newValue} + } + /// Returns true if `discoveryScript` has been explicitly set. + var hasDiscoveryScript: Bool {return self._discoveryScript != nil} + /// Clears the value of `discoveryScript`. Subsequent reads from it will return its default value. + mutating func clearDiscoveryScript() {self._discoveryScript = nil} + + /// Optional vendor, required for some cluster managers. + var vendor: String { + get {return _vendor ?? String()} + set {_vendor = newValue} + } + /// Returns true if `vendor` has been explicitly set. + var hasVendor: Bool {return self._vendor != nil} + /// Clears the value of `vendor`. Subsequent reads from it will return its default value. + mutating func clearVendor() {self._vendor = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _discoveryScript: String? = nil + fileprivate var _vendor: String? = nil +} + +/// A task resource request. +struct Spark_Connect_TaskResourceRequest: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) resource name. + var resourceName: String = String() + + /// (Required) resource amount requesting as a double to support fractional + /// resource requests. + var amount: Double = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_ResourceProfile: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) Resource requests for executors. Mapped from the resource name + /// (e.g., cores, memory, CPU) to its specific request. + var executorResources: Dictionary = [:] + + /// (Optional) Resource requests for tasks. Mapped from the resource name + /// (e.g., cores, memory, CPU) to its specific request. + var taskResources: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_Origin: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Indicate the origin type. + var function: Spark_Connect_Origin.OneOf_Function? = nil + + var pythonOrigin: Spark_Connect_PythonOrigin { + get { + if case .pythonOrigin(let v)? = function {return v} + return Spark_Connect_PythonOrigin() + } + set {function = .pythonOrigin(newValue)} + } + + var jvmOrigin: Spark_Connect_JvmOrigin { + get { + if case .jvmOrigin(let v)? = function {return v} + return Spark_Connect_JvmOrigin() + } + set {function = .jvmOrigin(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// (Required) Indicate the origin type. + enum OneOf_Function: Equatable, Sendable { + case pythonOrigin(Spark_Connect_PythonOrigin) + case jvmOrigin(Spark_Connect_JvmOrigin) + + } + + init() {} +} + +struct Spark_Connect_PythonOrigin: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Name of the origin, for example, the name of the function + var fragment: String = String() + + /// (Required) Callsite to show to end users, for example, stacktrace. + var callSite: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_JvmOrigin: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) Line number in the source file. + var line: Int32 { + get {return _line ?? 0} + set {_line = newValue} + } + /// Returns true if `line` has been explicitly set. + var hasLine: Bool {return self._line != nil} + /// Clears the value of `line`. Subsequent reads from it will return its default value. + mutating func clearLine() {self._line = nil} + + /// (Optional) Start position in the source file. + var startPosition: Int32 { + get {return _startPosition ?? 0} + set {_startPosition = newValue} + } + /// Returns true if `startPosition` has been explicitly set. + var hasStartPosition: Bool {return self._startPosition != nil} + /// Clears the value of `startPosition`. Subsequent reads from it will return its default value. + mutating func clearStartPosition() {self._startPosition = nil} + + /// (Optional) Start index in the source file. + var startIndex: Int32 { + get {return _startIndex ?? 0} + set {_startIndex = newValue} + } + /// Returns true if `startIndex` has been explicitly set. + var hasStartIndex: Bool {return self._startIndex != nil} + /// Clears the value of `startIndex`. Subsequent reads from it will return its default value. + mutating func clearStartIndex() {self._startIndex = nil} + + /// (Optional) Stop index in the source file. + var stopIndex: Int32 { + get {return _stopIndex ?? 0} + set {_stopIndex = newValue} + } + /// Returns true if `stopIndex` has been explicitly set. + var hasStopIndex: Bool {return self._stopIndex != nil} + /// Clears the value of `stopIndex`. Subsequent reads from it will return its default value. + mutating func clearStopIndex() {self._stopIndex = nil} + + /// (Optional) SQL text. + var sqlText: String { + get {return _sqlText ?? String()} + set {_sqlText = newValue} + } + /// Returns true if `sqlText` has been explicitly set. + var hasSqlText: Bool {return self._sqlText != nil} + /// Clears the value of `sqlText`. Subsequent reads from it will return its default value. + mutating func clearSqlText() {self._sqlText = nil} + + /// (Optional) Object type. + var objectType: String { + get {return _objectType ?? String()} + set {_objectType = newValue} + } + /// Returns true if `objectType` has been explicitly set. + var hasObjectType: Bool {return self._objectType != nil} + /// Clears the value of `objectType`. Subsequent reads from it will return its default value. + mutating func clearObjectType() {self._objectType = nil} + + /// (Optional) Object name. + var objectName: String { + get {return _objectName ?? String()} + set {_objectName = newValue} + } + /// Returns true if `objectName` has been explicitly set. + var hasObjectName: Bool {return self._objectName != nil} + /// Clears the value of `objectName`. Subsequent reads from it will return its default value. + mutating func clearObjectName() {self._objectName = nil} + + /// (Optional) Stack trace. + var stackTrace: [Spark_Connect_StackTraceElement] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _line: Int32? = nil + fileprivate var _startPosition: Int32? = nil + fileprivate var _startIndex: Int32? = nil + fileprivate var _stopIndex: Int32? = nil + fileprivate var _sqlText: String? = nil + fileprivate var _objectType: String? = nil + fileprivate var _objectName: String? = nil +} + +/// A message to hold a [[java.lang.StackTraceElement]]. +struct Spark_Connect_StackTraceElement: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) Class loader name + var classLoaderName: String { + get {return _classLoaderName ?? String()} + set {_classLoaderName = newValue} + } + /// Returns true if `classLoaderName` has been explicitly set. + var hasClassLoaderName: Bool {return self._classLoaderName != nil} + /// Clears the value of `classLoaderName`. Subsequent reads from it will return its default value. + mutating func clearClassLoaderName() {self._classLoaderName = nil} + + /// (Optional) Module name + var moduleName: String { + get {return _moduleName ?? String()} + set {_moduleName = newValue} + } + /// Returns true if `moduleName` has been explicitly set. + var hasModuleName: Bool {return self._moduleName != nil} + /// Clears the value of `moduleName`. Subsequent reads from it will return its default value. + mutating func clearModuleName() {self._moduleName = nil} + + /// (Optional) Module version + var moduleVersion: String { + get {return _moduleVersion ?? String()} + set {_moduleVersion = newValue} + } + /// Returns true if `moduleVersion` has been explicitly set. + var hasModuleVersion: Bool {return self._moduleVersion != nil} + /// Clears the value of `moduleVersion`. Subsequent reads from it will return its default value. + mutating func clearModuleVersion() {self._moduleVersion = nil} + + /// (Required) Declaring class + var declaringClass: String = String() + + /// (Required) Method name + var methodName: String = String() + + /// (Optional) File name + var fileName: String { + get {return _fileName ?? String()} + set {_fileName = newValue} + } + /// Returns true if `fileName` has been explicitly set. + var hasFileName: Bool {return self._fileName != nil} + /// Clears the value of `fileName`. Subsequent reads from it will return its default value. + mutating func clearFileName() {self._fileName = nil} + + /// (Required) Line number + var lineNumber: Int32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _classLoaderName: String? = nil + fileprivate var _moduleName: String? = nil + fileprivate var _moduleVersion: String? = nil + fileprivate var _fileName: String? = nil +} + +struct Spark_Connect_Bools: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var values: [Bool] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_Ints: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var values: [Int32] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_Longs: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var values: [Int64] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_Floats: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var values: [Float] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_Doubles: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var values: [Double] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_Strings: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var values: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_StorageLevel: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StorageLevel" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "use_disk"), + 2: .standard(proto: "use_memory"), + 3: .standard(proto: "use_off_heap"), + 4: .same(proto: "deserialized"), + 5: .same(proto: "replication"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.useDisk) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self.useMemory) }() + case 3: try { try decoder.decodeSingularBoolField(value: &self.useOffHeap) }() + case 4: try { try decoder.decodeSingularBoolField(value: &self.deserialized) }() + case 5: try { try decoder.decodeSingularInt32Field(value: &self.replication) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.useDisk != false { + try visitor.visitSingularBoolField(value: self.useDisk, fieldNumber: 1) + } + if self.useMemory != false { + try visitor.visitSingularBoolField(value: self.useMemory, fieldNumber: 2) + } + if self.useOffHeap != false { + try visitor.visitSingularBoolField(value: self.useOffHeap, fieldNumber: 3) + } + if self.deserialized != false { + try visitor.visitSingularBoolField(value: self.deserialized, fieldNumber: 4) + } + if self.replication != 0 { + try visitor.visitSingularInt32Field(value: self.replication, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StorageLevel, rhs: Spark_Connect_StorageLevel) -> Bool { + if lhs.useDisk != rhs.useDisk {return false} + if lhs.useMemory != rhs.useMemory {return false} + if lhs.useOffHeap != rhs.useOffHeap {return false} + if lhs.deserialized != rhs.deserialized {return false} + if lhs.replication != rhs.replication {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ResourceInformation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ResourceInformation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .same(proto: "addresses"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &self.addresses) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + if !self.addresses.isEmpty { + try visitor.visitRepeatedStringField(value: self.addresses, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ResourceInformation, rhs: Spark_Connect_ResourceInformation) -> Bool { + if lhs.name != rhs.name {return false} + if lhs.addresses != rhs.addresses {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExecutorResourceRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ExecutorResourceRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "resource_name"), + 2: .same(proto: "amount"), + 3: .standard(proto: "discovery_script"), + 4: .same(proto: "vendor"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.resourceName) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self.amount) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._discoveryScript) }() + case 4: try { try decoder.decodeSingularStringField(value: &self._vendor) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.resourceName.isEmpty { + try visitor.visitSingularStringField(value: self.resourceName, fieldNumber: 1) + } + if self.amount != 0 { + try visitor.visitSingularInt64Field(value: self.amount, fieldNumber: 2) + } + try { if let v = self._discoveryScript { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + try { if let v = self._vendor { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExecutorResourceRequest, rhs: Spark_Connect_ExecutorResourceRequest) -> Bool { + if lhs.resourceName != rhs.resourceName {return false} + if lhs.amount != rhs.amount {return false} + if lhs._discoveryScript != rhs._discoveryScript {return false} + if lhs._vendor != rhs._vendor {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_TaskResourceRequest: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".TaskResourceRequest" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "resource_name"), + 2: .same(proto: "amount"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.resourceName) }() + case 2: try { try decoder.decodeSingularDoubleField(value: &self.amount) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.resourceName.isEmpty { + try visitor.visitSingularStringField(value: self.resourceName, fieldNumber: 1) + } + if self.amount.bitPattern != 0 { + try visitor.visitSingularDoubleField(value: self.amount, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_TaskResourceRequest, rhs: Spark_Connect_TaskResourceRequest) -> Bool { + if lhs.resourceName != rhs.resourceName {return false} + if lhs.amount != rhs.amount {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ResourceProfile: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ResourceProfile" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "executor_resources"), + 2: .standard(proto: "task_resources"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.executorResources) }() + case 2: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.taskResources) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.executorResources.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.executorResources, fieldNumber: 1) + } + if !self.taskResources.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.taskResources, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ResourceProfile, rhs: Spark_Connect_ResourceProfile) -> Bool { + if lhs.executorResources != rhs.executorResources {return false} + if lhs.taskResources != rhs.taskResources {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Origin: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Origin" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "python_origin"), + 2: .standard(proto: "jvm_origin"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_PythonOrigin? + var hadOneofValue = false + if let current = self.function { + hadOneofValue = true + if case .pythonOrigin(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.function = .pythonOrigin(v) + } + }() + case 2: try { + var v: Spark_Connect_JvmOrigin? + var hadOneofValue = false + if let current = self.function { + hadOneofValue = true + if case .jvmOrigin(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.function = .jvmOrigin(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.function { + case .pythonOrigin?: try { + guard case .pythonOrigin(let v)? = self.function else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .jvmOrigin?: try { + guard case .jvmOrigin(let v)? = self.function else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Origin, rhs: Spark_Connect_Origin) -> Bool { + if lhs.function != rhs.function {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_PythonOrigin: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".PythonOrigin" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "fragment"), + 2: .standard(proto: "call_site"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.fragment) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.callSite) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.fragment.isEmpty { + try visitor.visitSingularStringField(value: self.fragment, fieldNumber: 1) + } + if !self.callSite.isEmpty { + try visitor.visitSingularStringField(value: self.callSite, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_PythonOrigin, rhs: Spark_Connect_PythonOrigin) -> Bool { + if lhs.fragment != rhs.fragment {return false} + if lhs.callSite != rhs.callSite {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_JvmOrigin: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".JvmOrigin" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "line"), + 2: .standard(proto: "start_position"), + 3: .standard(proto: "start_index"), + 4: .standard(proto: "stop_index"), + 5: .standard(proto: "sql_text"), + 6: .standard(proto: "object_type"), + 7: .standard(proto: "object_name"), + 8: .standard(proto: "stack_trace"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self._line) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self._startPosition) }() + case 3: try { try decoder.decodeSingularInt32Field(value: &self._startIndex) }() + case 4: try { try decoder.decodeSingularInt32Field(value: &self._stopIndex) }() + case 5: try { try decoder.decodeSingularStringField(value: &self._sqlText) }() + case 6: try { try decoder.decodeSingularStringField(value: &self._objectType) }() + case 7: try { try decoder.decodeSingularStringField(value: &self._objectName) }() + case 8: try { try decoder.decodeRepeatedMessageField(value: &self.stackTrace) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._line { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 1) + } }() + try { if let v = self._startPosition { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 2) + } }() + try { if let v = self._startIndex { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 3) + } }() + try { if let v = self._stopIndex { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 4) + } }() + try { if let v = self._sqlText { + try visitor.visitSingularStringField(value: v, fieldNumber: 5) + } }() + try { if let v = self._objectType { + try visitor.visitSingularStringField(value: v, fieldNumber: 6) + } }() + try { if let v = self._objectName { + try visitor.visitSingularStringField(value: v, fieldNumber: 7) + } }() + if !self.stackTrace.isEmpty { + try visitor.visitRepeatedMessageField(value: self.stackTrace, fieldNumber: 8) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_JvmOrigin, rhs: Spark_Connect_JvmOrigin) -> Bool { + if lhs._line != rhs._line {return false} + if lhs._startPosition != rhs._startPosition {return false} + if lhs._startIndex != rhs._startIndex {return false} + if lhs._stopIndex != rhs._stopIndex {return false} + if lhs._sqlText != rhs._sqlText {return false} + if lhs._objectType != rhs._objectType {return false} + if lhs._objectName != rhs._objectName {return false} + if lhs.stackTrace != rhs.stackTrace {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StackTraceElement: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StackTraceElement" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "class_loader_name"), + 2: .standard(proto: "module_name"), + 3: .standard(proto: "module_version"), + 4: .standard(proto: "declaring_class"), + 5: .standard(proto: "method_name"), + 6: .standard(proto: "file_name"), + 7: .standard(proto: "line_number"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._classLoaderName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._moduleName) }() + case 3: try { try decoder.decodeSingularStringField(value: &self._moduleVersion) }() + case 4: try { try decoder.decodeSingularStringField(value: &self.declaringClass) }() + case 5: try { try decoder.decodeSingularStringField(value: &self.methodName) }() + case 6: try { try decoder.decodeSingularStringField(value: &self._fileName) }() + case 7: try { try decoder.decodeSingularInt32Field(value: &self.lineNumber) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._classLoaderName { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._moduleName { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try { if let v = self._moduleVersion { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + if !self.declaringClass.isEmpty { + try visitor.visitSingularStringField(value: self.declaringClass, fieldNumber: 4) + } + if !self.methodName.isEmpty { + try visitor.visitSingularStringField(value: self.methodName, fieldNumber: 5) + } + try { if let v = self._fileName { + try visitor.visitSingularStringField(value: v, fieldNumber: 6) + } }() + if self.lineNumber != 0 { + try visitor.visitSingularInt32Field(value: self.lineNumber, fieldNumber: 7) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StackTraceElement, rhs: Spark_Connect_StackTraceElement) -> Bool { + if lhs._classLoaderName != rhs._classLoaderName {return false} + if lhs._moduleName != rhs._moduleName {return false} + if lhs._moduleVersion != rhs._moduleVersion {return false} + if lhs.declaringClass != rhs.declaringClass {return false} + if lhs.methodName != rhs.methodName {return false} + if lhs._fileName != rhs._fileName {return false} + if lhs.lineNumber != rhs.lineNumber {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Bools: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Bools" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "values"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedBoolField(value: &self.values) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.values.isEmpty { + try visitor.visitPackedBoolField(value: self.values, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Bools, rhs: Spark_Connect_Bools) -> Bool { + if lhs.values != rhs.values {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Ints: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Ints" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "values"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedInt32Field(value: &self.values) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.values.isEmpty { + try visitor.visitPackedInt32Field(value: self.values, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Ints, rhs: Spark_Connect_Ints) -> Bool { + if lhs.values != rhs.values {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Longs: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Longs" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "values"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedInt64Field(value: &self.values) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.values.isEmpty { + try visitor.visitPackedInt64Field(value: self.values, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Longs, rhs: Spark_Connect_Longs) -> Bool { + if lhs.values != rhs.values {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Floats: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Floats" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "values"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedFloatField(value: &self.values) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.values.isEmpty { + try visitor.visitPackedFloatField(value: self.values, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Floats, rhs: Spark_Connect_Floats) -> Bool { + if lhs.values != rhs.values {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Doubles: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Doubles" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "values"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedDoubleField(value: &self.values) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.values.isEmpty { + try visitor.visitPackedDoubleField(value: self.values, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Doubles, rhs: Spark_Connect_Doubles) -> Bool { + if lhs.values != rhs.values {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Strings: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Strings" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "values"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.values) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.values.isEmpty { + try visitor.visitRepeatedStringField(value: self.values, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Strings, rhs: Spark_Connect_Strings) -> Bool { + if lhs.values != rhs.values {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Sources/SparkConnect/example_plugins.grpc.swift b/Sources/SparkConnect/example_plugins.grpc.swift new file mode 100644 index 0000000..ba5648a --- /dev/null +++ b/Sources/SparkConnect/example_plugins.grpc.swift @@ -0,0 +1,26 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/example_plugins.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/example_plugins.pb.swift b/Sources/SparkConnect/example_plugins.pb.swift new file mode 100644 index 0000000..734efc9 --- /dev/null +++ b/Sources/SparkConnect/example_plugins.pb.swift @@ -0,0 +1,215 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/example_plugins.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +struct Spark_Connect_ExamplePluginRelation: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var input: Spark_Connect_Relation { + get {return _input ?? Spark_Connect_Relation()} + set {_input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return self._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {self._input = nil} + + var customField: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _input: Spark_Connect_Relation? = nil +} + +struct Spark_Connect_ExamplePluginExpression: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var child: Spark_Connect_Expression { + get {return _child ?? Spark_Connect_Expression()} + set {_child = newValue} + } + /// Returns true if `child` has been explicitly set. + var hasChild: Bool {return self._child != nil} + /// Clears the value of `child`. Subsequent reads from it will return its default value. + mutating func clearChild() {self._child = nil} + + var customField: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _child: Spark_Connect_Expression? = nil +} + +struct Spark_Connect_ExamplePluginCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var customField: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_ExamplePluginRelation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ExamplePluginRelation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "custom_field"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.customField) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.customField.isEmpty { + try visitor.visitSingularStringField(value: self.customField, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExamplePluginRelation, rhs: Spark_Connect_ExamplePluginRelation) -> Bool { + if lhs._input != rhs._input {return false} + if lhs.customField != rhs.customField {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExamplePluginExpression: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ExamplePluginExpression" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "child"), + 2: .standard(proto: "custom_field"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._child) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.customField) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._child { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.customField.isEmpty { + try visitor.visitSingularStringField(value: self.customField, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExamplePluginExpression, rhs: Spark_Connect_ExamplePluginExpression) -> Bool { + if lhs._child != rhs._child {return false} + if lhs.customField != rhs.customField {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExamplePluginCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ExamplePluginCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "custom_field"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.customField) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.customField.isEmpty { + try visitor.visitSingularStringField(value: self.customField, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExamplePluginCommand, rhs: Spark_Connect_ExamplePluginCommand) -> Bool { + if lhs.customField != rhs.customField {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Sources/SparkConnect/expressions.grpc.swift b/Sources/SparkConnect/expressions.grpc.swift new file mode 100644 index 0000000..c7af4ea --- /dev/null +++ b/Sources/SparkConnect/expressions.grpc.swift @@ -0,0 +1,26 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/expressions.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/expressions.pb.swift b/Sources/SparkConnect/expressions.pb.swift new file mode 100644 index 0000000..33a2cad --- /dev/null +++ b/Sources/SparkConnect/expressions.pb.swift @@ -0,0 +1,4922 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/expressions.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +/// Expression used to refer to fields, functions and similar. This can be used everywhere +/// expressions in SQL appear. +struct Spark_Connect_Expression: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var common: Spark_Connect_ExpressionCommon { + get {return _storage._common ?? Spark_Connect_ExpressionCommon()} + set {_uniqueStorage()._common = newValue} + } + /// Returns true if `common` has been explicitly set. + var hasCommon: Bool {return _storage._common != nil} + /// Clears the value of `common`. Subsequent reads from it will return its default value. + mutating func clearCommon() {_uniqueStorage()._common = nil} + + var exprType: OneOf_ExprType? { + get {return _storage._exprType} + set {_uniqueStorage()._exprType = newValue} + } + + var literal: Spark_Connect_Expression.Literal { + get { + if case .literal(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.Literal() + } + set {_uniqueStorage()._exprType = .literal(newValue)} + } + + var unresolvedAttribute: Spark_Connect_Expression.UnresolvedAttribute { + get { + if case .unresolvedAttribute(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.UnresolvedAttribute() + } + set {_uniqueStorage()._exprType = .unresolvedAttribute(newValue)} + } + + var unresolvedFunction: Spark_Connect_Expression.UnresolvedFunction { + get { + if case .unresolvedFunction(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.UnresolvedFunction() + } + set {_uniqueStorage()._exprType = .unresolvedFunction(newValue)} + } + + var expressionString: Spark_Connect_Expression.ExpressionString { + get { + if case .expressionString(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.ExpressionString() + } + set {_uniqueStorage()._exprType = .expressionString(newValue)} + } + + var unresolvedStar: Spark_Connect_Expression.UnresolvedStar { + get { + if case .unresolvedStar(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.UnresolvedStar() + } + set {_uniqueStorage()._exprType = .unresolvedStar(newValue)} + } + + var alias: Spark_Connect_Expression.Alias { + get { + if case .alias(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.Alias() + } + set {_uniqueStorage()._exprType = .alias(newValue)} + } + + var cast: Spark_Connect_Expression.Cast { + get { + if case .cast(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.Cast() + } + set {_uniqueStorage()._exprType = .cast(newValue)} + } + + var unresolvedRegex: Spark_Connect_Expression.UnresolvedRegex { + get { + if case .unresolvedRegex(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.UnresolvedRegex() + } + set {_uniqueStorage()._exprType = .unresolvedRegex(newValue)} + } + + var sortOrder: Spark_Connect_Expression.SortOrder { + get { + if case .sortOrder(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.SortOrder() + } + set {_uniqueStorage()._exprType = .sortOrder(newValue)} + } + + var lambdaFunction: Spark_Connect_Expression.LambdaFunction { + get { + if case .lambdaFunction(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.LambdaFunction() + } + set {_uniqueStorage()._exprType = .lambdaFunction(newValue)} + } + + var window: Spark_Connect_Expression.Window { + get { + if case .window(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.Window() + } + set {_uniqueStorage()._exprType = .window(newValue)} + } + + var unresolvedExtractValue: Spark_Connect_Expression.UnresolvedExtractValue { + get { + if case .unresolvedExtractValue(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.UnresolvedExtractValue() + } + set {_uniqueStorage()._exprType = .unresolvedExtractValue(newValue)} + } + + var updateFields: Spark_Connect_Expression.UpdateFields { + get { + if case .updateFields(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.UpdateFields() + } + set {_uniqueStorage()._exprType = .updateFields(newValue)} + } + + var unresolvedNamedLambdaVariable: Spark_Connect_Expression.UnresolvedNamedLambdaVariable { + get { + if case .unresolvedNamedLambdaVariable(let v)? = _storage._exprType {return v} + return Spark_Connect_Expression.UnresolvedNamedLambdaVariable() + } + set {_uniqueStorage()._exprType = .unresolvedNamedLambdaVariable(newValue)} + } + + var commonInlineUserDefinedFunction: Spark_Connect_CommonInlineUserDefinedFunction { + get { + if case .commonInlineUserDefinedFunction(let v)? = _storage._exprType {return v} + return Spark_Connect_CommonInlineUserDefinedFunction() + } + set {_uniqueStorage()._exprType = .commonInlineUserDefinedFunction(newValue)} + } + + var callFunction: Spark_Connect_CallFunction { + get { + if case .callFunction(let v)? = _storage._exprType {return v} + return Spark_Connect_CallFunction() + } + set {_uniqueStorage()._exprType = .callFunction(newValue)} + } + + var namedArgumentExpression: Spark_Connect_NamedArgumentExpression { + get { + if case .namedArgumentExpression(let v)? = _storage._exprType {return v} + return Spark_Connect_NamedArgumentExpression() + } + set {_uniqueStorage()._exprType = .namedArgumentExpression(newValue)} + } + + var mergeAction: Spark_Connect_MergeAction { + get { + if case .mergeAction(let v)? = _storage._exprType {return v} + return Spark_Connect_MergeAction() + } + set {_uniqueStorage()._exprType = .mergeAction(newValue)} + } + + var typedAggregateExpression: Spark_Connect_TypedAggregateExpression { + get { + if case .typedAggregateExpression(let v)? = _storage._exprType {return v} + return Spark_Connect_TypedAggregateExpression() + } + set {_uniqueStorage()._exprType = .typedAggregateExpression(newValue)} + } + + var subqueryExpression: Spark_Connect_SubqueryExpression { + get { + if case .subqueryExpression(let v)? = _storage._exprType {return v} + return Spark_Connect_SubqueryExpression() + } + set {_uniqueStorage()._exprType = .subqueryExpression(newValue)} + } + + /// This field is used to mark extensions to the protocol. When plugins generate arbitrary + /// relations they can add them here. During the planning the correct resolution is done. + var `extension`: SwiftProtobuf.Google_Protobuf_Any { + get { + if case .extension(let v)? = _storage._exprType {return v} + return SwiftProtobuf.Google_Protobuf_Any() + } + set {_uniqueStorage()._exprType = .extension(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_ExprType: Equatable, Sendable { + case literal(Spark_Connect_Expression.Literal) + case unresolvedAttribute(Spark_Connect_Expression.UnresolvedAttribute) + case unresolvedFunction(Spark_Connect_Expression.UnresolvedFunction) + case expressionString(Spark_Connect_Expression.ExpressionString) + case unresolvedStar(Spark_Connect_Expression.UnresolvedStar) + case alias(Spark_Connect_Expression.Alias) + case cast(Spark_Connect_Expression.Cast) + case unresolvedRegex(Spark_Connect_Expression.UnresolvedRegex) + case sortOrder(Spark_Connect_Expression.SortOrder) + case lambdaFunction(Spark_Connect_Expression.LambdaFunction) + case window(Spark_Connect_Expression.Window) + case unresolvedExtractValue(Spark_Connect_Expression.UnresolvedExtractValue) + case updateFields(Spark_Connect_Expression.UpdateFields) + case unresolvedNamedLambdaVariable(Spark_Connect_Expression.UnresolvedNamedLambdaVariable) + case commonInlineUserDefinedFunction(Spark_Connect_CommonInlineUserDefinedFunction) + case callFunction(Spark_Connect_CallFunction) + case namedArgumentExpression(Spark_Connect_NamedArgumentExpression) + case mergeAction(Spark_Connect_MergeAction) + case typedAggregateExpression(Spark_Connect_TypedAggregateExpression) + case subqueryExpression(Spark_Connect_SubqueryExpression) + /// This field is used to mark extensions to the protocol. When plugins generate arbitrary + /// relations they can add them here. During the planning the correct resolution is done. + case `extension`(SwiftProtobuf.Google_Protobuf_Any) + + } + + /// Expression for the OVER clause or WINDOW clause. + struct Window: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The window function. + var windowFunction: Spark_Connect_Expression { + get {return _storage._windowFunction ?? Spark_Connect_Expression()} + set {_uniqueStorage()._windowFunction = newValue} + } + /// Returns true if `windowFunction` has been explicitly set. + var hasWindowFunction: Bool {return _storage._windowFunction != nil} + /// Clears the value of `windowFunction`. Subsequent reads from it will return its default value. + mutating func clearWindowFunction() {_uniqueStorage()._windowFunction = nil} + + /// (Optional) The way that input rows are partitioned. + var partitionSpec: [Spark_Connect_Expression] { + get {return _storage._partitionSpec} + set {_uniqueStorage()._partitionSpec = newValue} + } + + /// (Optional) Ordering of rows in a partition. + var orderSpec: [Spark_Connect_Expression.SortOrder] { + get {return _storage._orderSpec} + set {_uniqueStorage()._orderSpec = newValue} + } + + /// (Optional) Window frame in a partition. + /// + /// If not set, it will be treated as 'UnspecifiedFrame'. + var frameSpec: Spark_Connect_Expression.Window.WindowFrame { + get {return _storage._frameSpec ?? Spark_Connect_Expression.Window.WindowFrame()} + set {_uniqueStorage()._frameSpec = newValue} + } + /// Returns true if `frameSpec` has been explicitly set. + var hasFrameSpec: Bool {return _storage._frameSpec != nil} + /// Clears the value of `frameSpec`. Subsequent reads from it will return its default value. + mutating func clearFrameSpec() {_uniqueStorage()._frameSpec = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// The window frame + struct WindowFrame: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The type of the frame. + var frameType: Spark_Connect_Expression.Window.WindowFrame.FrameType { + get {return _storage._frameType} + set {_uniqueStorage()._frameType = newValue} + } + + /// (Required) The lower bound of the frame. + var lower: Spark_Connect_Expression.Window.WindowFrame.FrameBoundary { + get {return _storage._lower ?? Spark_Connect_Expression.Window.WindowFrame.FrameBoundary()} + set {_uniqueStorage()._lower = newValue} + } + /// Returns true if `lower` has been explicitly set. + var hasLower: Bool {return _storage._lower != nil} + /// Clears the value of `lower`. Subsequent reads from it will return its default value. + mutating func clearLower() {_uniqueStorage()._lower = nil} + + /// (Required) The upper bound of the frame. + var upper: Spark_Connect_Expression.Window.WindowFrame.FrameBoundary { + get {return _storage._upper ?? Spark_Connect_Expression.Window.WindowFrame.FrameBoundary()} + set {_uniqueStorage()._upper = newValue} + } + /// Returns true if `upper` has been explicitly set. + var hasUpper: Bool {return _storage._upper != nil} + /// Clears the value of `upper`. Subsequent reads from it will return its default value. + mutating func clearUpper() {_uniqueStorage()._upper = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum FrameType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case undefined // = 0 + + /// RowFrame treats rows in a partition individually. + case row // = 1 + + /// RangeFrame treats rows in a partition as groups of peers. + /// All rows having the same 'ORDER BY' ordering are considered as peers. + case range // = 2 + case UNRECOGNIZED(Int) + + init() { + self = .undefined + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .undefined + case 1: self = .row + case 2: self = .range + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .undefined: return 0 + case .row: return 1 + case .range: return 2 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_Expression.Window.WindowFrame.FrameType] = [ + .undefined, + .row, + .range, + ] + + } + + struct FrameBoundary: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var boundary: OneOf_Boundary? { + get {return _storage._boundary} + set {_uniqueStorage()._boundary = newValue} + } + + /// CURRENT ROW boundary + var currentRow: Bool { + get { + if case .currentRow(let v)? = _storage._boundary {return v} + return false + } + set {_uniqueStorage()._boundary = .currentRow(newValue)} + } + + /// UNBOUNDED boundary. + /// For lower bound, it will be converted to 'UnboundedPreceding'. + /// for upper bound, it will be converted to 'UnboundedFollowing'. + var unbounded: Bool { + get { + if case .unbounded(let v)? = _storage._boundary {return v} + return false + } + set {_uniqueStorage()._boundary = .unbounded(newValue)} + } + + /// This is an expression for future proofing. We are expecting literals on the server side. + var value: Spark_Connect_Expression { + get { + if case .value(let v)? = _storage._boundary {return v} + return Spark_Connect_Expression() + } + set {_uniqueStorage()._boundary = .value(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Boundary: Equatable, Sendable { + /// CURRENT ROW boundary + case currentRow(Bool) + /// UNBOUNDED boundary. + /// For lower bound, it will be converted to 'UnboundedPreceding'. + /// for upper bound, it will be converted to 'UnboundedFollowing'. + case unbounded(Bool) + /// This is an expression for future proofing. We are expecting literals on the server side. + case value(Spark_Connect_Expression) + + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + /// SortOrder is used to specify the data ordering, it is normally used in Sort and Window. + /// It is an unevaluable expression and cannot be evaluated, so can not be used in Projection. + struct SortOrder: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The expression to be sorted. + var child: Spark_Connect_Expression { + get {return _storage._child ?? Spark_Connect_Expression()} + set {_uniqueStorage()._child = newValue} + } + /// Returns true if `child` has been explicitly set. + var hasChild: Bool {return _storage._child != nil} + /// Clears the value of `child`. Subsequent reads from it will return its default value. + mutating func clearChild() {_uniqueStorage()._child = nil} + + /// (Required) The sort direction, should be ASCENDING or DESCENDING. + var direction: Spark_Connect_Expression.SortOrder.SortDirection { + get {return _storage._direction} + set {_uniqueStorage()._direction = newValue} + } + + /// (Required) How to deal with NULLs, should be NULLS_FIRST or NULLS_LAST. + var nullOrdering: Spark_Connect_Expression.SortOrder.NullOrdering { + get {return _storage._nullOrdering} + set {_uniqueStorage()._nullOrdering = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum SortDirection: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + case ascending // = 1 + case descending // = 2 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .ascending + case 2: self = .descending + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .ascending: return 1 + case .descending: return 2 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_Expression.SortOrder.SortDirection] = [ + .unspecified, + .ascending, + .descending, + ] + + } + + enum NullOrdering: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case sortNullsUnspecified // = 0 + case sortNullsFirst // = 1 + case sortNullsLast // = 2 + case UNRECOGNIZED(Int) + + init() { + self = .sortNullsUnspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .sortNullsUnspecified + case 1: self = .sortNullsFirst + case 2: self = .sortNullsLast + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .sortNullsUnspecified: return 0 + case .sortNullsFirst: return 1 + case .sortNullsLast: return 2 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_Expression.SortOrder.NullOrdering] = [ + .sortNullsUnspecified, + .sortNullsFirst, + .sortNullsLast, + ] + + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + struct Cast: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) the expression to be casted. + var expr: Spark_Connect_Expression { + get {return _storage._expr ?? Spark_Connect_Expression()} + set {_uniqueStorage()._expr = newValue} + } + /// Returns true if `expr` has been explicitly set. + var hasExpr: Bool {return _storage._expr != nil} + /// Clears the value of `expr`. Subsequent reads from it will return its default value. + mutating func clearExpr() {_uniqueStorage()._expr = nil} + + /// (Required) the data type that the expr to be casted to. + var castToType: OneOf_CastToType? { + get {return _storage._castToType} + set {_uniqueStorage()._castToType = newValue} + } + + var type: Spark_Connect_DataType { + get { + if case .type(let v)? = _storage._castToType {return v} + return Spark_Connect_DataType() + } + set {_uniqueStorage()._castToType = .type(newValue)} + } + + /// If this is set, Server will use Catalyst parser to parse this string to DataType. + var typeStr: String { + get { + if case .typeStr(let v)? = _storage._castToType {return v} + return String() + } + set {_uniqueStorage()._castToType = .typeStr(newValue)} + } + + /// (Optional) The expression evaluation mode. + var evalMode: Spark_Connect_Expression.Cast.EvalMode { + get {return _storage._evalMode} + set {_uniqueStorage()._evalMode = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// (Required) the data type that the expr to be casted to. + enum OneOf_CastToType: Equatable, Sendable { + case type(Spark_Connect_DataType) + /// If this is set, Server will use Catalyst parser to parse this string to DataType. + case typeStr(String) + + } + + enum EvalMode: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + case legacy // = 1 + case ansi // = 2 + case `try` // = 3 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .legacy + case 2: self = .ansi + case 3: self = .try + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .legacy: return 1 + case .ansi: return 2 + case .try: return 3 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_Expression.Cast.EvalMode] = [ + .unspecified, + .legacy, + .ansi, + .try, + ] + + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + struct Literal: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var literalType: Spark_Connect_Expression.Literal.OneOf_LiteralType? = nil + + var null: Spark_Connect_DataType { + get { + if case .null(let v)? = literalType {return v} + return Spark_Connect_DataType() + } + set {literalType = .null(newValue)} + } + + var binary: Data { + get { + if case .binary(let v)? = literalType {return v} + return Data() + } + set {literalType = .binary(newValue)} + } + + var boolean: Bool { + get { + if case .boolean(let v)? = literalType {return v} + return false + } + set {literalType = .boolean(newValue)} + } + + var byte: Int32 { + get { + if case .byte(let v)? = literalType {return v} + return 0 + } + set {literalType = .byte(newValue)} + } + + var short: Int32 { + get { + if case .short(let v)? = literalType {return v} + return 0 + } + set {literalType = .short(newValue)} + } + + var integer: Int32 { + get { + if case .integer(let v)? = literalType {return v} + return 0 + } + set {literalType = .integer(newValue)} + } + + var long: Int64 { + get { + if case .long(let v)? = literalType {return v} + return 0 + } + set {literalType = .long(newValue)} + } + + var float: Float { + get { + if case .float(let v)? = literalType {return v} + return 0 + } + set {literalType = .float(newValue)} + } + + var double: Double { + get { + if case .double(let v)? = literalType {return v} + return 0 + } + set {literalType = .double(newValue)} + } + + var decimal: Spark_Connect_Expression.Literal.Decimal { + get { + if case .decimal(let v)? = literalType {return v} + return Spark_Connect_Expression.Literal.Decimal() + } + set {literalType = .decimal(newValue)} + } + + var string: String { + get { + if case .string(let v)? = literalType {return v} + return String() + } + set {literalType = .string(newValue)} + } + + /// Date in units of days since the UNIX epoch. + var date: Int32 { + get { + if case .date(let v)? = literalType {return v} + return 0 + } + set {literalType = .date(newValue)} + } + + /// Timestamp in units of microseconds since the UNIX epoch. + var timestamp: Int64 { + get { + if case .timestamp(let v)? = literalType {return v} + return 0 + } + set {literalType = .timestamp(newValue)} + } + + /// Timestamp in units of microseconds since the UNIX epoch (without timezone information). + var timestampNtz: Int64 { + get { + if case .timestampNtz(let v)? = literalType {return v} + return 0 + } + set {literalType = .timestampNtz(newValue)} + } + + var calendarInterval: Spark_Connect_Expression.Literal.CalendarInterval { + get { + if case .calendarInterval(let v)? = literalType {return v} + return Spark_Connect_Expression.Literal.CalendarInterval() + } + set {literalType = .calendarInterval(newValue)} + } + + var yearMonthInterval: Int32 { + get { + if case .yearMonthInterval(let v)? = literalType {return v} + return 0 + } + set {literalType = .yearMonthInterval(newValue)} + } + + var dayTimeInterval: Int64 { + get { + if case .dayTimeInterval(let v)? = literalType {return v} + return 0 + } + set {literalType = .dayTimeInterval(newValue)} + } + + var array: Spark_Connect_Expression.Literal.Array { + get { + if case .array(let v)? = literalType {return v} + return Spark_Connect_Expression.Literal.Array() + } + set {literalType = .array(newValue)} + } + + var map: Spark_Connect_Expression.Literal.Map { + get { + if case .map(let v)? = literalType {return v} + return Spark_Connect_Expression.Literal.Map() + } + set {literalType = .map(newValue)} + } + + var `struct`: Spark_Connect_Expression.Literal.Struct { + get { + if case .struct(let v)? = literalType {return v} + return Spark_Connect_Expression.Literal.Struct() + } + set {literalType = .struct(newValue)} + } + + var specializedArray: Spark_Connect_Expression.Literal.SpecializedArray { + get { + if case .specializedArray(let v)? = literalType {return v} + return Spark_Connect_Expression.Literal.SpecializedArray() + } + set {literalType = .specializedArray(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_LiteralType: Equatable, @unchecked Sendable { + case null(Spark_Connect_DataType) + case binary(Data) + case boolean(Bool) + case byte(Int32) + case short(Int32) + case integer(Int32) + case long(Int64) + case float(Float) + case double(Double) + case decimal(Spark_Connect_Expression.Literal.Decimal) + case string(String) + /// Date in units of days since the UNIX epoch. + case date(Int32) + /// Timestamp in units of microseconds since the UNIX epoch. + case timestamp(Int64) + /// Timestamp in units of microseconds since the UNIX epoch (without timezone information). + case timestampNtz(Int64) + case calendarInterval(Spark_Connect_Expression.Literal.CalendarInterval) + case yearMonthInterval(Int32) + case dayTimeInterval(Int64) + case array(Spark_Connect_Expression.Literal.Array) + case map(Spark_Connect_Expression.Literal.Map) + case `struct`(Spark_Connect_Expression.Literal.Struct) + case specializedArray(Spark_Connect_Expression.Literal.SpecializedArray) + + } + + struct Decimal: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// the string representation. + var value: String = String() + + /// The maximum number of digits allowed in the value. + /// the maximum precision is 38. + var precision: Int32 { + get {return _precision ?? 0} + set {_precision = newValue} + } + /// Returns true if `precision` has been explicitly set. + var hasPrecision: Bool {return self._precision != nil} + /// Clears the value of `precision`. Subsequent reads from it will return its default value. + mutating func clearPrecision() {self._precision = nil} + + /// declared scale of decimal literal + var scale: Int32 { + get {return _scale ?? 0} + set {_scale = newValue} + } + /// Returns true if `scale` has been explicitly set. + var hasScale: Bool {return self._scale != nil} + /// Clears the value of `scale`. Subsequent reads from it will return its default value. + mutating func clearScale() {self._scale = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _precision: Int32? = nil + fileprivate var _scale: Int32? = nil + } + + struct CalendarInterval: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var months: Int32 = 0 + + var days: Int32 = 0 + + var microseconds: Int64 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Array: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var elementType: Spark_Connect_DataType { + get {return _elementType ?? Spark_Connect_DataType()} + set {_elementType = newValue} + } + /// Returns true if `elementType` has been explicitly set. + var hasElementType: Bool {return self._elementType != nil} + /// Clears the value of `elementType`. Subsequent reads from it will return its default value. + mutating func clearElementType() {self._elementType = nil} + + var elements: [Spark_Connect_Expression.Literal] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _elementType: Spark_Connect_DataType? = nil + } + + struct Map: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var keyType: Spark_Connect_DataType { + get {return _keyType ?? Spark_Connect_DataType()} + set {_keyType = newValue} + } + /// Returns true if `keyType` has been explicitly set. + var hasKeyType: Bool {return self._keyType != nil} + /// Clears the value of `keyType`. Subsequent reads from it will return its default value. + mutating func clearKeyType() {self._keyType = nil} + + var valueType: Spark_Connect_DataType { + get {return _valueType ?? Spark_Connect_DataType()} + set {_valueType = newValue} + } + /// Returns true if `valueType` has been explicitly set. + var hasValueType: Bool {return self._valueType != nil} + /// Clears the value of `valueType`. Subsequent reads from it will return its default value. + mutating func clearValueType() {self._valueType = nil} + + var keys: [Spark_Connect_Expression.Literal] = [] + + var values: [Spark_Connect_Expression.Literal] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _keyType: Spark_Connect_DataType? = nil + fileprivate var _valueType: Spark_Connect_DataType? = nil + } + + struct Struct: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var structType: Spark_Connect_DataType { + get {return _structType ?? Spark_Connect_DataType()} + set {_structType = newValue} + } + /// Returns true if `structType` has been explicitly set. + var hasStructType: Bool {return self._structType != nil} + /// Clears the value of `structType`. Subsequent reads from it will return its default value. + mutating func clearStructType() {self._structType = nil} + + var elements: [Spark_Connect_Expression.Literal] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _structType: Spark_Connect_DataType? = nil + } + + struct SpecializedArray: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var valueType: Spark_Connect_Expression.Literal.SpecializedArray.OneOf_ValueType? = nil + + var bools: Spark_Connect_Bools { + get { + if case .bools(let v)? = valueType {return v} + return Spark_Connect_Bools() + } + set {valueType = .bools(newValue)} + } + + var ints: Spark_Connect_Ints { + get { + if case .ints(let v)? = valueType {return v} + return Spark_Connect_Ints() + } + set {valueType = .ints(newValue)} + } + + var longs: Spark_Connect_Longs { + get { + if case .longs(let v)? = valueType {return v} + return Spark_Connect_Longs() + } + set {valueType = .longs(newValue)} + } + + var floats: Spark_Connect_Floats { + get { + if case .floats(let v)? = valueType {return v} + return Spark_Connect_Floats() + } + set {valueType = .floats(newValue)} + } + + var doubles: Spark_Connect_Doubles { + get { + if case .doubles(let v)? = valueType {return v} + return Spark_Connect_Doubles() + } + set {valueType = .doubles(newValue)} + } + + var strings: Spark_Connect_Strings { + get { + if case .strings(let v)? = valueType {return v} + return Spark_Connect_Strings() + } + set {valueType = .strings(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_ValueType: Equatable, Sendable { + case bools(Spark_Connect_Bools) + case ints(Spark_Connect_Ints) + case longs(Spark_Connect_Longs) + case floats(Spark_Connect_Floats) + case doubles(Spark_Connect_Doubles) + case strings(Spark_Connect_Strings) + + } + + init() {} + } + + init() {} + } + + /// An unresolved attribute that is not explicitly bound to a specific column, but the column + /// is resolved during analysis by name. + struct UnresolvedAttribute: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) An identifier that will be parsed by Catalyst parser. This should follow the + /// Spark SQL identifier syntax. + var unparsedIdentifier: String = String() + + /// (Optional) The id of corresponding connect plan. + var planID: Int64 { + get {return _planID ?? 0} + set {_planID = newValue} + } + /// Returns true if `planID` has been explicitly set. + var hasPlanID: Bool {return self._planID != nil} + /// Clears the value of `planID`. Subsequent reads from it will return its default value. + mutating func clearPlanID() {self._planID = nil} + + /// (Optional) The requested column is a metadata column. + var isMetadataColumn: Bool { + get {return _isMetadataColumn ?? false} + set {_isMetadataColumn = newValue} + } + /// Returns true if `isMetadataColumn` has been explicitly set. + var hasIsMetadataColumn: Bool {return self._isMetadataColumn != nil} + /// Clears the value of `isMetadataColumn`. Subsequent reads from it will return its default value. + mutating func clearIsMetadataColumn() {self._isMetadataColumn = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _planID: Int64? = nil + fileprivate var _isMetadataColumn: Bool? = nil + } + + /// An unresolved function is not explicitly bound to one explicit function, but the function + /// is resolved during analysis following Sparks name resolution rules. + struct UnresolvedFunction: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) name (or unparsed name for user defined function) for the unresolved function. + var functionName: String = String() + + /// (Optional) Function arguments. Empty arguments are allowed. + var arguments: [Spark_Connect_Expression] = [] + + /// (Required) Indicate if this function should be applied on distinct values. + var isDistinct: Bool = false + + /// (Required) Indicate if this is a user defined function. + /// + /// When it is not a user defined function, Connect will use the function name directly. + /// When it is a user defined function, Connect will parse the function name first. + var isUserDefinedFunction: Bool = false + + /// (Optional) Indicate if this function is defined in the internal function registry. + /// If not set, the server will try to look up the function in the internal function registry + /// and decide appropriately. + var isInternal: Bool { + get {return _isInternal ?? false} + set {_isInternal = newValue} + } + /// Returns true if `isInternal` has been explicitly set. + var hasIsInternal: Bool {return self._isInternal != nil} + /// Clears the value of `isInternal`. Subsequent reads from it will return its default value. + mutating func clearIsInternal() {self._isInternal = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _isInternal: Bool? = nil + } + + /// Expression as string. + struct ExpressionString: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) A SQL expression that will be parsed by Catalyst parser. + var expression: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + /// UnresolvedStar is used to expand all the fields of a relation or struct. + struct UnresolvedStar: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) The target of the expansion. + /// + /// If set, it should end with '.*' and will be parsed by 'parseAttributeName' + /// in the server side. + var unparsedTarget: String { + get {return _unparsedTarget ?? String()} + set {_unparsedTarget = newValue} + } + /// Returns true if `unparsedTarget` has been explicitly set. + var hasUnparsedTarget: Bool {return self._unparsedTarget != nil} + /// Clears the value of `unparsedTarget`. Subsequent reads from it will return its default value. + mutating func clearUnparsedTarget() {self._unparsedTarget = nil} + + /// (Optional) The id of corresponding connect plan. + var planID: Int64 { + get {return _planID ?? 0} + set {_planID = newValue} + } + /// Returns true if `planID` has been explicitly set. + var hasPlanID: Bool {return self._planID != nil} + /// Clears the value of `planID`. Subsequent reads from it will return its default value. + mutating func clearPlanID() {self._planID = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _unparsedTarget: String? = nil + fileprivate var _planID: Int64? = nil + } + + /// Represents all of the input attributes to a given relational operator, for example in + /// "SELECT `(id)?+.+` FROM ...". + struct UnresolvedRegex: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The column name used to extract column with regex. + var colName: String = String() + + /// (Optional) The id of corresponding connect plan. + var planID: Int64 { + get {return _planID ?? 0} + set {_planID = newValue} + } + /// Returns true if `planID` has been explicitly set. + var hasPlanID: Bool {return self._planID != nil} + /// Clears the value of `planID`. Subsequent reads from it will return its default value. + mutating func clearPlanID() {self._planID = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _planID: Int64? = nil + } + + /// Extracts a value or values from an Expression + struct UnresolvedExtractValue: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The expression to extract value from, can be + /// Map, Array, Struct or array of Structs. + var child: Spark_Connect_Expression { + get {return _storage._child ?? Spark_Connect_Expression()} + set {_uniqueStorage()._child = newValue} + } + /// Returns true if `child` has been explicitly set. + var hasChild: Bool {return _storage._child != nil} + /// Clears the value of `child`. Subsequent reads from it will return its default value. + mutating func clearChild() {_uniqueStorage()._child = nil} + + /// (Required) The expression to describe the extraction, can be + /// key of Map, index of Array, field name of Struct. + var extraction: Spark_Connect_Expression { + get {return _storage._extraction ?? Spark_Connect_Expression()} + set {_uniqueStorage()._extraction = newValue} + } + /// Returns true if `extraction` has been explicitly set. + var hasExtraction: Bool {return _storage._extraction != nil} + /// Clears the value of `extraction`. Subsequent reads from it will return its default value. + mutating func clearExtraction() {_uniqueStorage()._extraction = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + /// Add, replace or drop a field of `StructType` expression by name. + struct UpdateFields: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The struct expression. + var structExpression: Spark_Connect_Expression { + get {return _storage._structExpression ?? Spark_Connect_Expression()} + set {_uniqueStorage()._structExpression = newValue} + } + /// Returns true if `structExpression` has been explicitly set. + var hasStructExpression: Bool {return _storage._structExpression != nil} + /// Clears the value of `structExpression`. Subsequent reads from it will return its default value. + mutating func clearStructExpression() {_uniqueStorage()._structExpression = nil} + + /// (Required) The field name. + var fieldName: String { + get {return _storage._fieldName} + set {_uniqueStorage()._fieldName = newValue} + } + + /// (Optional) The expression to add or replace. + /// + /// When not set, it means this field will be dropped. + var valueExpression: Spark_Connect_Expression { + get {return _storage._valueExpression ?? Spark_Connect_Expression()} + set {_uniqueStorage()._valueExpression = newValue} + } + /// Returns true if `valueExpression` has been explicitly set. + var hasValueExpression: Bool {return _storage._valueExpression != nil} + /// Clears the value of `valueExpression`. Subsequent reads from it will return its default value. + mutating func clearValueExpression() {_uniqueStorage()._valueExpression = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + struct Alias: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The expression that alias will be added on. + var expr: Spark_Connect_Expression { + get {return _storage._expr ?? Spark_Connect_Expression()} + set {_uniqueStorage()._expr = newValue} + } + /// Returns true if `expr` has been explicitly set. + var hasExpr: Bool {return _storage._expr != nil} + /// Clears the value of `expr`. Subsequent reads from it will return its default value. + mutating func clearExpr() {_uniqueStorage()._expr = nil} + + /// (Required) a list of name parts for the alias. + /// + /// Scalar columns only has one name that presents. + var name: [String] { + get {return _storage._name} + set {_uniqueStorage()._name = newValue} + } + + /// (Optional) Alias metadata expressed as a JSON map. + var metadata: String { + get {return _storage._metadata ?? String()} + set {_uniqueStorage()._metadata = newValue} + } + /// Returns true if `metadata` has been explicitly set. + var hasMetadata: Bool {return _storage._metadata != nil} + /// Clears the value of `metadata`. Subsequent reads from it will return its default value. + mutating func clearMetadata() {_uniqueStorage()._metadata = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + struct LambdaFunction: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The lambda function. + /// + /// The function body should use 'UnresolvedAttribute' as arguments, the sever side will + /// replace 'UnresolvedAttribute' with 'UnresolvedNamedLambdaVariable'. + var function: Spark_Connect_Expression { + get {return _storage._function ?? Spark_Connect_Expression()} + set {_uniqueStorage()._function = newValue} + } + /// Returns true if `function` has been explicitly set. + var hasFunction: Bool {return _storage._function != nil} + /// Clears the value of `function`. Subsequent reads from it will return its default value. + mutating func clearFunction() {_uniqueStorage()._function = nil} + + /// (Required) Function variables. Must contains 1 ~ 3 variables. + var arguments: [Spark_Connect_Expression.UnresolvedNamedLambdaVariable] { + get {return _storage._arguments} + set {_uniqueStorage()._arguments = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + struct UnresolvedNamedLambdaVariable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) a list of name parts for the variable. Must not be empty. + var nameParts: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_ExpressionCommon: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Keep the information of the origin for this expression such as stacktrace. + var origin: Spark_Connect_Origin { + get {return _origin ?? Spark_Connect_Origin()} + set {_origin = newValue} + } + /// Returns true if `origin` has been explicitly set. + var hasOrigin: Bool {return self._origin != nil} + /// Clears the value of `origin`. Subsequent reads from it will return its default value. + mutating func clearOrigin() {self._origin = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _origin: Spark_Connect_Origin? = nil +} + +struct Spark_Connect_CommonInlineUserDefinedFunction: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Name of the user-defined function. + var functionName: String = String() + + /// (Optional) Indicate if the user-defined function is deterministic. + var deterministic: Bool = false + + /// (Optional) Function arguments. Empty arguments are allowed. + var arguments: [Spark_Connect_Expression] = [] + + /// (Required) Indicate the function type of the user-defined function. + var function: Spark_Connect_CommonInlineUserDefinedFunction.OneOf_Function? = nil + + var pythonUdf: Spark_Connect_PythonUDF { + get { + if case .pythonUdf(let v)? = function {return v} + return Spark_Connect_PythonUDF() + } + set {function = .pythonUdf(newValue)} + } + + var scalarScalaUdf: Spark_Connect_ScalarScalaUDF { + get { + if case .scalarScalaUdf(let v)? = function {return v} + return Spark_Connect_ScalarScalaUDF() + } + set {function = .scalarScalaUdf(newValue)} + } + + var javaUdf: Spark_Connect_JavaUDF { + get { + if case .javaUdf(let v)? = function {return v} + return Spark_Connect_JavaUDF() + } + set {function = .javaUdf(newValue)} + } + + /// (Required) Indicate if this function should be applied on distinct values. + var isDistinct: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// (Required) Indicate the function type of the user-defined function. + enum OneOf_Function: Equatable, Sendable { + case pythonUdf(Spark_Connect_PythonUDF) + case scalarScalaUdf(Spark_Connect_ScalarScalaUDF) + case javaUdf(Spark_Connect_JavaUDF) + + } + + init() {} +} + +struct Spark_Connect_PythonUDF: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Output type of the Python UDF + var outputType: Spark_Connect_DataType { + get {return _outputType ?? Spark_Connect_DataType()} + set {_outputType = newValue} + } + /// Returns true if `outputType` has been explicitly set. + var hasOutputType: Bool {return self._outputType != nil} + /// Clears the value of `outputType`. Subsequent reads from it will return its default value. + mutating func clearOutputType() {self._outputType = nil} + + /// (Required) EvalType of the Python UDF + var evalType: Int32 = 0 + + /// (Required) The encoded commands of the Python UDF + var command: Data = Data() + + /// (Required) Python version being used in the client. + var pythonVer: String = String() + + /// (Optional) Additional includes for the Python UDF. + var additionalIncludes: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _outputType: Spark_Connect_DataType? = nil +} + +struct Spark_Connect_ScalarScalaUDF: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Serialized JVM object containing UDF definition, input encoders and output encoder + var payload: Data = Data() + + /// (Optional) Input type(s) of the UDF + var inputTypes: [Spark_Connect_DataType] = [] + + /// (Required) Output type of the UDF + var outputType: Spark_Connect_DataType { + get {return _outputType ?? Spark_Connect_DataType()} + set {_outputType = newValue} + } + /// Returns true if `outputType` has been explicitly set. + var hasOutputType: Bool {return self._outputType != nil} + /// Clears the value of `outputType`. Subsequent reads from it will return its default value. + mutating func clearOutputType() {self._outputType = nil} + + /// (Required) True if the UDF can return null value + var nullable: Bool = false + + /// (Required) Indicate if the UDF is an aggregate function + var aggregate: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _outputType: Spark_Connect_DataType? = nil +} + +struct Spark_Connect_JavaUDF: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Fully qualified name of Java class + var className: String = String() + + /// (Optional) Output type of the Java UDF + var outputType: Spark_Connect_DataType { + get {return _outputType ?? Spark_Connect_DataType()} + set {_outputType = newValue} + } + /// Returns true if `outputType` has been explicitly set. + var hasOutputType: Bool {return self._outputType != nil} + /// Clears the value of `outputType`. Subsequent reads from it will return its default value. + mutating func clearOutputType() {self._outputType = nil} + + /// (Required) Indicate if the Java user-defined function is an aggregate function + var aggregate: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _outputType: Spark_Connect_DataType? = nil +} + +struct Spark_Connect_TypedAggregateExpression: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The aggregate function object packed into bytes. + var scalarScalaUdf: Spark_Connect_ScalarScalaUDF { + get {return _scalarScalaUdf ?? Spark_Connect_ScalarScalaUDF()} + set {_scalarScalaUdf = newValue} + } + /// Returns true if `scalarScalaUdf` has been explicitly set. + var hasScalarScalaUdf: Bool {return self._scalarScalaUdf != nil} + /// Clears the value of `scalarScalaUdf`. Subsequent reads from it will return its default value. + mutating func clearScalarScalaUdf() {self._scalarScalaUdf = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _scalarScalaUdf: Spark_Connect_ScalarScalaUDF? = nil +} + +struct Spark_Connect_CallFunction: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Unparsed name of the SQL function. + var functionName: String = String() + + /// (Optional) Function arguments. Empty arguments are allowed. + var arguments: [Spark_Connect_Expression] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_NamedArgumentExpression: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The key of the named argument. + var key: String { + get {return _storage._key} + set {_uniqueStorage()._key = newValue} + } + + /// (Required) The value expression of the named argument. + var value: Spark_Connect_Expression { + get {return _storage._value ?? Spark_Connect_Expression()} + set {_uniqueStorage()._value = newValue} + } + /// Returns true if `value` has been explicitly set. + var hasValue: Bool {return _storage._value != nil} + /// Clears the value of `value`. Subsequent reads from it will return its default value. + mutating func clearValue() {_uniqueStorage()._value = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_MergeAction: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The action type of the merge action. + var actionType: Spark_Connect_MergeAction.ActionType { + get {return _storage._actionType} + set {_uniqueStorage()._actionType = newValue} + } + + /// (Optional) The condition expression of the merge action. + var condition: Spark_Connect_Expression { + get {return _storage._condition ?? Spark_Connect_Expression()} + set {_uniqueStorage()._condition = newValue} + } + /// Returns true if `condition` has been explicitly set. + var hasCondition: Bool {return _storage._condition != nil} + /// Clears the value of `condition`. Subsequent reads from it will return its default value. + mutating func clearCondition() {_uniqueStorage()._condition = nil} + + /// (Optional) The assignments of the merge action. Required for ActionTypes INSERT and UPDATE. + var assignments: [Spark_Connect_MergeAction.Assignment] { + get {return _storage._assignments} + set {_uniqueStorage()._assignments = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum ActionType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case invalid // = 0 + case delete // = 1 + case insert // = 2 + case insertStar // = 3 + case update // = 4 + case updateStar // = 5 + case UNRECOGNIZED(Int) + + init() { + self = .invalid + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .invalid + case 1: self = .delete + case 2: self = .insert + case 3: self = .insertStar + case 4: self = .update + case 5: self = .updateStar + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .invalid: return 0 + case .delete: return 1 + case .insert: return 2 + case .insertStar: return 3 + case .update: return 4 + case .updateStar: return 5 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_MergeAction.ActionType] = [ + .invalid, + .delete, + .insert, + .insertStar, + .update, + .updateStar, + ] + + } + + struct Assignment: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The key of the assignment. + var key: Spark_Connect_Expression { + get {return _key ?? Spark_Connect_Expression()} + set {_key = newValue} + } + /// Returns true if `key` has been explicitly set. + var hasKey: Bool {return self._key != nil} + /// Clears the value of `key`. Subsequent reads from it will return its default value. + mutating func clearKey() {self._key = nil} + + /// (Required) The value of the assignment. + var value: Spark_Connect_Expression { + get {return _value ?? Spark_Connect_Expression()} + set {_value = newValue} + } + /// Returns true if `value` has been explicitly set. + var hasValue: Bool {return self._value != nil} + /// Clears the value of `value`. Subsequent reads from it will return its default value. + mutating func clearValue() {self._value = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _key: Spark_Connect_Expression? = nil + fileprivate var _value: Spark_Connect_Expression? = nil + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_SubqueryExpression: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The ID of the corresponding connect plan. + var planID: Int64 = 0 + + /// (Required) The type of the subquery. + var subqueryType: Spark_Connect_SubqueryExpression.SubqueryType = .unknown + + /// (Optional) Options specific to table arguments. + var tableArgOptions: Spark_Connect_SubqueryExpression.TableArgOptions { + get {return _tableArgOptions ?? Spark_Connect_SubqueryExpression.TableArgOptions()} + set {_tableArgOptions = newValue} + } + /// Returns true if `tableArgOptions` has been explicitly set. + var hasTableArgOptions: Bool {return self._tableArgOptions != nil} + /// Clears the value of `tableArgOptions`. Subsequent reads from it will return its default value. + mutating func clearTableArgOptions() {self._tableArgOptions = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum SubqueryType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unknown // = 0 + case scalar // = 1 + case exists // = 2 + case tableArg // = 3 + case UNRECOGNIZED(Int) + + init() { + self = .unknown + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unknown + case 1: self = .scalar + case 2: self = .exists + case 3: self = .tableArg + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unknown: return 0 + case .scalar: return 1 + case .exists: return 2 + case .tableArg: return 3 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_SubqueryExpression.SubqueryType] = [ + .unknown, + .scalar, + .exists, + .tableArg, + ] + + } + + /// Nested message for table argument options. + struct TableArgOptions: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) The way that input rows are partitioned. + var partitionSpec: [Spark_Connect_Expression] = [] + + /// (Optional) Ordering of rows in a partition. + var orderSpec: [Spark_Connect_Expression.SortOrder] = [] + + /// (Optional) Whether this is a single partition. + var withSinglePartition: Bool { + get {return _withSinglePartition ?? false} + set {_withSinglePartition = newValue} + } + /// Returns true if `withSinglePartition` has been explicitly set. + var hasWithSinglePartition: Bool {return self._withSinglePartition != nil} + /// Clears the value of `withSinglePartition`. Subsequent reads from it will return its default value. + mutating func clearWithSinglePartition() {self._withSinglePartition = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _withSinglePartition: Bool? = nil + } + + init() {} + + fileprivate var _tableArgOptions: Spark_Connect_SubqueryExpression.TableArgOptions? = nil +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_Expression: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Expression" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 18: .same(proto: "common"), + 1: .same(proto: "literal"), + 2: .standard(proto: "unresolved_attribute"), + 3: .standard(proto: "unresolved_function"), + 4: .standard(proto: "expression_string"), + 5: .standard(proto: "unresolved_star"), + 6: .same(proto: "alias"), + 7: .same(proto: "cast"), + 8: .standard(proto: "unresolved_regex"), + 9: .standard(proto: "sort_order"), + 10: .standard(proto: "lambda_function"), + 11: .same(proto: "window"), + 12: .standard(proto: "unresolved_extract_value"), + 13: .standard(proto: "update_fields"), + 14: .standard(proto: "unresolved_named_lambda_variable"), + 15: .standard(proto: "common_inline_user_defined_function"), + 16: .standard(proto: "call_function"), + 17: .standard(proto: "named_argument_expression"), + 19: .standard(proto: "merge_action"), + 20: .standard(proto: "typed_aggregate_expression"), + 21: .standard(proto: "subquery_expression"), + 999: .same(proto: "extension"), + ] + + fileprivate class _StorageClass { + var _common: Spark_Connect_ExpressionCommon? = nil + var _exprType: Spark_Connect_Expression.OneOf_ExprType? + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _common = source._common + _exprType = source._exprType + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_Expression.Literal? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .literal(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .literal(v) + } + }() + case 2: try { + var v: Spark_Connect_Expression.UnresolvedAttribute? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .unresolvedAttribute(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .unresolvedAttribute(v) + } + }() + case 3: try { + var v: Spark_Connect_Expression.UnresolvedFunction? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .unresolvedFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .unresolvedFunction(v) + } + }() + case 4: try { + var v: Spark_Connect_Expression.ExpressionString? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .expressionString(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .expressionString(v) + } + }() + case 5: try { + var v: Spark_Connect_Expression.UnresolvedStar? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .unresolvedStar(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .unresolvedStar(v) + } + }() + case 6: try { + var v: Spark_Connect_Expression.Alias? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .alias(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .alias(v) + } + }() + case 7: try { + var v: Spark_Connect_Expression.Cast? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .cast(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .cast(v) + } + }() + case 8: try { + var v: Spark_Connect_Expression.UnresolvedRegex? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .unresolvedRegex(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .unresolvedRegex(v) + } + }() + case 9: try { + var v: Spark_Connect_Expression.SortOrder? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .sortOrder(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .sortOrder(v) + } + }() + case 10: try { + var v: Spark_Connect_Expression.LambdaFunction? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .lambdaFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .lambdaFunction(v) + } + }() + case 11: try { + var v: Spark_Connect_Expression.Window? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .window(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .window(v) + } + }() + case 12: try { + var v: Spark_Connect_Expression.UnresolvedExtractValue? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .unresolvedExtractValue(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .unresolvedExtractValue(v) + } + }() + case 13: try { + var v: Spark_Connect_Expression.UpdateFields? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .updateFields(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .updateFields(v) + } + }() + case 14: try { + var v: Spark_Connect_Expression.UnresolvedNamedLambdaVariable? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .unresolvedNamedLambdaVariable(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .unresolvedNamedLambdaVariable(v) + } + }() + case 15: try { + var v: Spark_Connect_CommonInlineUserDefinedFunction? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .commonInlineUserDefinedFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .commonInlineUserDefinedFunction(v) + } + }() + case 16: try { + var v: Spark_Connect_CallFunction? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .callFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .callFunction(v) + } + }() + case 17: try { + var v: Spark_Connect_NamedArgumentExpression? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .namedArgumentExpression(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .namedArgumentExpression(v) + } + }() + case 18: try { try decoder.decodeSingularMessageField(value: &_storage._common) }() + case 19: try { + var v: Spark_Connect_MergeAction? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .mergeAction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .mergeAction(v) + } + }() + case 20: try { + var v: Spark_Connect_TypedAggregateExpression? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .typedAggregateExpression(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .typedAggregateExpression(v) + } + }() + case 21: try { + var v: Spark_Connect_SubqueryExpression? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .subqueryExpression(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .subqueryExpression(v) + } + }() + case 999: try { + var v: SwiftProtobuf.Google_Protobuf_Any? + var hadOneofValue = false + if let current = _storage._exprType { + hadOneofValue = true + if case .extension(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._exprType = .extension(v) + } + }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch _storage._exprType { + case .literal?: try { + guard case .literal(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .unresolvedAttribute?: try { + guard case .unresolvedAttribute(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .unresolvedFunction?: try { + guard case .unresolvedFunction(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .expressionString?: try { + guard case .expressionString(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .unresolvedStar?: try { + guard case .unresolvedStar(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .alias?: try { + guard case .alias(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case .cast?: try { + guard case .cast(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case .unresolvedRegex?: try { + guard case .unresolvedRegex(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + }() + case .sortOrder?: try { + guard case .sortOrder(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 9) + }() + case .lambdaFunction?: try { + guard case .lambdaFunction(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 10) + }() + case .window?: try { + guard case .window(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 11) + }() + case .unresolvedExtractValue?: try { + guard case .unresolvedExtractValue(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 12) + }() + case .updateFields?: try { + guard case .updateFields(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 13) + }() + case .unresolvedNamedLambdaVariable?: try { + guard case .unresolvedNamedLambdaVariable(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 14) + }() + case .commonInlineUserDefinedFunction?: try { + guard case .commonInlineUserDefinedFunction(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 15) + }() + case .callFunction?: try { + guard case .callFunction(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 16) + }() + case .namedArgumentExpression?: try { + guard case .namedArgumentExpression(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 17) + }() + default: break + } + try { if let v = _storage._common { + try visitor.visitSingularMessageField(value: v, fieldNumber: 18) + } }() + switch _storage._exprType { + case .mergeAction?: try { + guard case .mergeAction(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 19) + }() + case .typedAggregateExpression?: try { + guard case .typedAggregateExpression(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 20) + }() + case .subqueryExpression?: try { + guard case .subqueryExpression(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 21) + }() + case .extension?: try { + guard case .extension(let v)? = _storage._exprType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 999) + }() + default: break + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression, rhs: Spark_Connect_Expression) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._common != rhs_storage._common {return false} + if _storage._exprType != rhs_storage._exprType {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Window: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".Window" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "window_function"), + 2: .standard(proto: "partition_spec"), + 3: .standard(proto: "order_spec"), + 4: .standard(proto: "frame_spec"), + ] + + fileprivate class _StorageClass { + var _windowFunction: Spark_Connect_Expression? = nil + var _partitionSpec: [Spark_Connect_Expression] = [] + var _orderSpec: [Spark_Connect_Expression.SortOrder] = [] + var _frameSpec: Spark_Connect_Expression.Window.WindowFrame? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _windowFunction = source._windowFunction + _partitionSpec = source._partitionSpec + _orderSpec = source._orderSpec + _frameSpec = source._frameSpec + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._windowFunction) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._partitionSpec) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._orderSpec) }() + case 4: try { try decoder.decodeSingularMessageField(value: &_storage._frameSpec) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._windowFunction { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._partitionSpec.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._partitionSpec, fieldNumber: 2) + } + if !_storage._orderSpec.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._orderSpec, fieldNumber: 3) + } + try { if let v = _storage._frameSpec { + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Window, rhs: Spark_Connect_Expression.Window) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._windowFunction != rhs_storage._windowFunction {return false} + if _storage._partitionSpec != rhs_storage._partitionSpec {return false} + if _storage._orderSpec != rhs_storage._orderSpec {return false} + if _storage._frameSpec != rhs_storage._frameSpec {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Window.WindowFrame: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.Window.protoMessageName + ".WindowFrame" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "frame_type"), + 2: .same(proto: "lower"), + 3: .same(proto: "upper"), + ] + + fileprivate class _StorageClass { + var _frameType: Spark_Connect_Expression.Window.WindowFrame.FrameType = .undefined + var _lower: Spark_Connect_Expression.Window.WindowFrame.FrameBoundary? = nil + var _upper: Spark_Connect_Expression.Window.WindowFrame.FrameBoundary? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _frameType = source._frameType + _lower = source._lower + _upper = source._upper + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularEnumField(value: &_storage._frameType) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._lower) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._upper) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if _storage._frameType != .undefined { + try visitor.visitSingularEnumField(value: _storage._frameType, fieldNumber: 1) + } + try { if let v = _storage._lower { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = _storage._upper { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Window.WindowFrame, rhs: Spark_Connect_Expression.Window.WindowFrame) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._frameType != rhs_storage._frameType {return false} + if _storage._lower != rhs_storage._lower {return false} + if _storage._upper != rhs_storage._upper {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Window.WindowFrame.FrameType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "FRAME_TYPE_UNDEFINED"), + 1: .same(proto: "FRAME_TYPE_ROW"), + 2: .same(proto: "FRAME_TYPE_RANGE"), + ] +} + +extension Spark_Connect_Expression.Window.WindowFrame.FrameBoundary: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.Window.WindowFrame.protoMessageName + ".FrameBoundary" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "current_row"), + 2: .same(proto: "unbounded"), + 3: .same(proto: "value"), + ] + + fileprivate class _StorageClass { + var _boundary: Spark_Connect_Expression.Window.WindowFrame.FrameBoundary.OneOf_Boundary? + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _boundary = source._boundary + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if _storage._boundary != nil {try decoder.handleConflictingOneOf()} + _storage._boundary = .currentRow(v) + } + }() + case 2: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if _storage._boundary != nil {try decoder.handleConflictingOneOf()} + _storage._boundary = .unbounded(v) + } + }() + case 3: try { + var v: Spark_Connect_Expression? + var hadOneofValue = false + if let current = _storage._boundary { + hadOneofValue = true + if case .value(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._boundary = .value(v) + } + }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch _storage._boundary { + case .currentRow?: try { + guard case .currentRow(let v)? = _storage._boundary else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 1) + }() + case .unbounded?: try { + guard case .unbounded(let v)? = _storage._boundary else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 2) + }() + case .value?: try { + guard case .value(let v)? = _storage._boundary else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case nil: break + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Window.WindowFrame.FrameBoundary, rhs: Spark_Connect_Expression.Window.WindowFrame.FrameBoundary) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._boundary != rhs_storage._boundary {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.SortOrder: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".SortOrder" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "child"), + 2: .same(proto: "direction"), + 3: .standard(proto: "null_ordering"), + ] + + fileprivate class _StorageClass { + var _child: Spark_Connect_Expression? = nil + var _direction: Spark_Connect_Expression.SortOrder.SortDirection = .unspecified + var _nullOrdering: Spark_Connect_Expression.SortOrder.NullOrdering = .sortNullsUnspecified + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _child = source._child + _direction = source._direction + _nullOrdering = source._nullOrdering + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._child) }() + case 2: try { try decoder.decodeSingularEnumField(value: &_storage._direction) }() + case 3: try { try decoder.decodeSingularEnumField(value: &_storage._nullOrdering) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._child { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._direction != .unspecified { + try visitor.visitSingularEnumField(value: _storage._direction, fieldNumber: 2) + } + if _storage._nullOrdering != .sortNullsUnspecified { + try visitor.visitSingularEnumField(value: _storage._nullOrdering, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.SortOrder, rhs: Spark_Connect_Expression.SortOrder) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._child != rhs_storage._child {return false} + if _storage._direction != rhs_storage._direction {return false} + if _storage._nullOrdering != rhs_storage._nullOrdering {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.SortOrder.SortDirection: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SORT_DIRECTION_UNSPECIFIED"), + 1: .same(proto: "SORT_DIRECTION_ASCENDING"), + 2: .same(proto: "SORT_DIRECTION_DESCENDING"), + ] +} + +extension Spark_Connect_Expression.SortOrder.NullOrdering: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SORT_NULLS_UNSPECIFIED"), + 1: .same(proto: "SORT_NULLS_FIRST"), + 2: .same(proto: "SORT_NULLS_LAST"), + ] +} + +extension Spark_Connect_Expression.Cast: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".Cast" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "expr"), + 2: .same(proto: "type"), + 3: .standard(proto: "type_str"), + 4: .standard(proto: "eval_mode"), + ] + + fileprivate class _StorageClass { + var _expr: Spark_Connect_Expression? = nil + var _castToType: Spark_Connect_Expression.Cast.OneOf_CastToType? + var _evalMode: Spark_Connect_Expression.Cast.EvalMode = .unspecified + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _expr = source._expr + _castToType = source._castToType + _evalMode = source._evalMode + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._expr) }() + case 2: try { + var v: Spark_Connect_DataType? + var hadOneofValue = false + if let current = _storage._castToType { + hadOneofValue = true + if case .type(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._castToType = .type(v) + } + }() + case 3: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if _storage._castToType != nil {try decoder.handleConflictingOneOf()} + _storage._castToType = .typeStr(v) + } + }() + case 4: try { try decoder.decodeSingularEnumField(value: &_storage._evalMode) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._expr { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + switch _storage._castToType { + case .type?: try { + guard case .type(let v)? = _storage._castToType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .typeStr?: try { + guard case .typeStr(let v)? = _storage._castToType else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + }() + case nil: break + } + if _storage._evalMode != .unspecified { + try visitor.visitSingularEnumField(value: _storage._evalMode, fieldNumber: 4) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Cast, rhs: Spark_Connect_Expression.Cast) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._expr != rhs_storage._expr {return false} + if _storage._castToType != rhs_storage._castToType {return false} + if _storage._evalMode != rhs_storage._evalMode {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Cast.EvalMode: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "EVAL_MODE_UNSPECIFIED"), + 1: .same(proto: "EVAL_MODE_LEGACY"), + 2: .same(proto: "EVAL_MODE_ANSI"), + 3: .same(proto: "EVAL_MODE_TRY"), + ] +} + +extension Spark_Connect_Expression.Literal: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".Literal" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "null"), + 2: .same(proto: "binary"), + 3: .same(proto: "boolean"), + 4: .same(proto: "byte"), + 5: .same(proto: "short"), + 6: .same(proto: "integer"), + 7: .same(proto: "long"), + 10: .same(proto: "float"), + 11: .same(proto: "double"), + 12: .same(proto: "decimal"), + 13: .same(proto: "string"), + 16: .same(proto: "date"), + 17: .same(proto: "timestamp"), + 18: .standard(proto: "timestamp_ntz"), + 19: .standard(proto: "calendar_interval"), + 20: .standard(proto: "year_month_interval"), + 21: .standard(proto: "day_time_interval"), + 22: .same(proto: "array"), + 23: .same(proto: "map"), + 24: .same(proto: "struct"), + 25: .standard(proto: "specialized_array"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_DataType? + var hadOneofValue = false + if let current = self.literalType { + hadOneofValue = true + if case .null(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.literalType = .null(v) + } + }() + case 2: try { + var v: Data? + try decoder.decodeSingularBytesField(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .binary(v) + } + }() + case 3: try { + var v: Bool? + try decoder.decodeSingularBoolField(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .boolean(v) + } + }() + case 4: try { + var v: Int32? + try decoder.decodeSingularInt32Field(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .byte(v) + } + }() + case 5: try { + var v: Int32? + try decoder.decodeSingularInt32Field(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .short(v) + } + }() + case 6: try { + var v: Int32? + try decoder.decodeSingularInt32Field(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .integer(v) + } + }() + case 7: try { + var v: Int64? + try decoder.decodeSingularInt64Field(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .long(v) + } + }() + case 10: try { + var v: Float? + try decoder.decodeSingularFloatField(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .float(v) + } + }() + case 11: try { + var v: Double? + try decoder.decodeSingularDoubleField(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .double(v) + } + }() + case 12: try { + var v: Spark_Connect_Expression.Literal.Decimal? + var hadOneofValue = false + if let current = self.literalType { + hadOneofValue = true + if case .decimal(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.literalType = .decimal(v) + } + }() + case 13: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .string(v) + } + }() + case 16: try { + var v: Int32? + try decoder.decodeSingularInt32Field(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .date(v) + } + }() + case 17: try { + var v: Int64? + try decoder.decodeSingularInt64Field(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .timestamp(v) + } + }() + case 18: try { + var v: Int64? + try decoder.decodeSingularInt64Field(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .timestampNtz(v) + } + }() + case 19: try { + var v: Spark_Connect_Expression.Literal.CalendarInterval? + var hadOneofValue = false + if let current = self.literalType { + hadOneofValue = true + if case .calendarInterval(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.literalType = .calendarInterval(v) + } + }() + case 20: try { + var v: Int32? + try decoder.decodeSingularInt32Field(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .yearMonthInterval(v) + } + }() + case 21: try { + var v: Int64? + try decoder.decodeSingularInt64Field(value: &v) + if let v = v { + if self.literalType != nil {try decoder.handleConflictingOneOf()} + self.literalType = .dayTimeInterval(v) + } + }() + case 22: try { + var v: Spark_Connect_Expression.Literal.Array? + var hadOneofValue = false + if let current = self.literalType { + hadOneofValue = true + if case .array(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.literalType = .array(v) + } + }() + case 23: try { + var v: Spark_Connect_Expression.Literal.Map? + var hadOneofValue = false + if let current = self.literalType { + hadOneofValue = true + if case .map(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.literalType = .map(v) + } + }() + case 24: try { + var v: Spark_Connect_Expression.Literal.Struct? + var hadOneofValue = false + if let current = self.literalType { + hadOneofValue = true + if case .struct(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.literalType = .struct(v) + } + }() + case 25: try { + var v: Spark_Connect_Expression.Literal.SpecializedArray? + var hadOneofValue = false + if let current = self.literalType { + hadOneofValue = true + if case .specializedArray(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.literalType = .specializedArray(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.literalType { + case .null?: try { + guard case .null(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .binary?: try { + guard case .binary(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularBytesField(value: v, fieldNumber: 2) + }() + case .boolean?: try { + guard case .boolean(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularBoolField(value: v, fieldNumber: 3) + }() + case .byte?: try { + guard case .byte(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularInt32Field(value: v, fieldNumber: 4) + }() + case .short?: try { + guard case .short(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularInt32Field(value: v, fieldNumber: 5) + }() + case .integer?: try { + guard case .integer(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularInt32Field(value: v, fieldNumber: 6) + }() + case .long?: try { + guard case .long(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularInt64Field(value: v, fieldNumber: 7) + }() + case .float?: try { + guard case .float(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularFloatField(value: v, fieldNumber: 10) + }() + case .double?: try { + guard case .double(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularDoubleField(value: v, fieldNumber: 11) + }() + case .decimal?: try { + guard case .decimal(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 12) + }() + case .string?: try { + guard case .string(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 13) + }() + case .date?: try { + guard case .date(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularInt32Field(value: v, fieldNumber: 16) + }() + case .timestamp?: try { + guard case .timestamp(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularInt64Field(value: v, fieldNumber: 17) + }() + case .timestampNtz?: try { + guard case .timestampNtz(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularInt64Field(value: v, fieldNumber: 18) + }() + case .calendarInterval?: try { + guard case .calendarInterval(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 19) + }() + case .yearMonthInterval?: try { + guard case .yearMonthInterval(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularInt32Field(value: v, fieldNumber: 20) + }() + case .dayTimeInterval?: try { + guard case .dayTimeInterval(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularInt64Field(value: v, fieldNumber: 21) + }() + case .array?: try { + guard case .array(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 22) + }() + case .map?: try { + guard case .map(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 23) + }() + case .struct?: try { + guard case .struct(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 24) + }() + case .specializedArray?: try { + guard case .specializedArray(let v)? = self.literalType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 25) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Literal, rhs: Spark_Connect_Expression.Literal) -> Bool { + if lhs.literalType != rhs.literalType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Literal.Decimal: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.Literal.protoMessageName + ".Decimal" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "value"), + 2: .same(proto: "precision"), + 3: .same(proto: "scale"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.value) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self._precision) }() + case 3: try { try decoder.decodeSingularInt32Field(value: &self._scale) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.value.isEmpty { + try visitor.visitSingularStringField(value: self.value, fieldNumber: 1) + } + try { if let v = self._precision { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 2) + } }() + try { if let v = self._scale { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Literal.Decimal, rhs: Spark_Connect_Expression.Literal.Decimal) -> Bool { + if lhs.value != rhs.value {return false} + if lhs._precision != rhs._precision {return false} + if lhs._scale != rhs._scale {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Literal.CalendarInterval: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.Literal.protoMessageName + ".CalendarInterval" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "months"), + 2: .same(proto: "days"), + 3: .same(proto: "microseconds"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.months) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self.days) }() + case 3: try { try decoder.decodeSingularInt64Field(value: &self.microseconds) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.months != 0 { + try visitor.visitSingularInt32Field(value: self.months, fieldNumber: 1) + } + if self.days != 0 { + try visitor.visitSingularInt32Field(value: self.days, fieldNumber: 2) + } + if self.microseconds != 0 { + try visitor.visitSingularInt64Field(value: self.microseconds, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Literal.CalendarInterval, rhs: Spark_Connect_Expression.Literal.CalendarInterval) -> Bool { + if lhs.months != rhs.months {return false} + if lhs.days != rhs.days {return false} + if lhs.microseconds != rhs.microseconds {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Literal.Array: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.Literal.protoMessageName + ".Array" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "element_type"), + 2: .same(proto: "elements"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._elementType) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.elements) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._elementType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.elements.isEmpty { + try visitor.visitRepeatedMessageField(value: self.elements, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Literal.Array, rhs: Spark_Connect_Expression.Literal.Array) -> Bool { + if lhs._elementType != rhs._elementType {return false} + if lhs.elements != rhs.elements {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Literal.Map: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.Literal.protoMessageName + ".Map" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "key_type"), + 2: .standard(proto: "value_type"), + 3: .same(proto: "keys"), + 4: .same(proto: "values"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._keyType) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._valueType) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &self.keys) }() + case 4: try { try decoder.decodeRepeatedMessageField(value: &self.values) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._keyType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._valueType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if !self.keys.isEmpty { + try visitor.visitRepeatedMessageField(value: self.keys, fieldNumber: 3) + } + if !self.values.isEmpty { + try visitor.visitRepeatedMessageField(value: self.values, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Literal.Map, rhs: Spark_Connect_Expression.Literal.Map) -> Bool { + if lhs._keyType != rhs._keyType {return false} + if lhs._valueType != rhs._valueType {return false} + if lhs.keys != rhs.keys {return false} + if lhs.values != rhs.values {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Literal.Struct: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.Literal.protoMessageName + ".Struct" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "struct_type"), + 2: .same(proto: "elements"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._structType) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.elements) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._structType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.elements.isEmpty { + try visitor.visitRepeatedMessageField(value: self.elements, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Literal.Struct, rhs: Spark_Connect_Expression.Literal.Struct) -> Bool { + if lhs._structType != rhs._structType {return false} + if lhs.elements != rhs.elements {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Literal.SpecializedArray: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.Literal.protoMessageName + ".SpecializedArray" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "bools"), + 2: .same(proto: "ints"), + 3: .same(proto: "longs"), + 4: .same(proto: "floats"), + 5: .same(proto: "doubles"), + 6: .same(proto: "strings"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_Bools? + var hadOneofValue = false + if let current = self.valueType { + hadOneofValue = true + if case .bools(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.valueType = .bools(v) + } + }() + case 2: try { + var v: Spark_Connect_Ints? + var hadOneofValue = false + if let current = self.valueType { + hadOneofValue = true + if case .ints(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.valueType = .ints(v) + } + }() + case 3: try { + var v: Spark_Connect_Longs? + var hadOneofValue = false + if let current = self.valueType { + hadOneofValue = true + if case .longs(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.valueType = .longs(v) + } + }() + case 4: try { + var v: Spark_Connect_Floats? + var hadOneofValue = false + if let current = self.valueType { + hadOneofValue = true + if case .floats(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.valueType = .floats(v) + } + }() + case 5: try { + var v: Spark_Connect_Doubles? + var hadOneofValue = false + if let current = self.valueType { + hadOneofValue = true + if case .doubles(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.valueType = .doubles(v) + } + }() + case 6: try { + var v: Spark_Connect_Strings? + var hadOneofValue = false + if let current = self.valueType { + hadOneofValue = true + if case .strings(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.valueType = .strings(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.valueType { + case .bools?: try { + guard case .bools(let v)? = self.valueType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .ints?: try { + guard case .ints(let v)? = self.valueType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .longs?: try { + guard case .longs(let v)? = self.valueType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .floats?: try { + guard case .floats(let v)? = self.valueType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .doubles?: try { + guard case .doubles(let v)? = self.valueType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .strings?: try { + guard case .strings(let v)? = self.valueType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Literal.SpecializedArray, rhs: Spark_Connect_Expression.Literal.SpecializedArray) -> Bool { + if lhs.valueType != rhs.valueType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.UnresolvedAttribute: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".UnresolvedAttribute" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "unparsed_identifier"), + 2: .standard(proto: "plan_id"), + 3: .standard(proto: "is_metadata_column"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.unparsedIdentifier) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self._planID) }() + case 3: try { try decoder.decodeSingularBoolField(value: &self._isMetadataColumn) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.unparsedIdentifier.isEmpty { + try visitor.visitSingularStringField(value: self.unparsedIdentifier, fieldNumber: 1) + } + try { if let v = self._planID { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 2) + } }() + try { if let v = self._isMetadataColumn { + try visitor.visitSingularBoolField(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.UnresolvedAttribute, rhs: Spark_Connect_Expression.UnresolvedAttribute) -> Bool { + if lhs.unparsedIdentifier != rhs.unparsedIdentifier {return false} + if lhs._planID != rhs._planID {return false} + if lhs._isMetadataColumn != rhs._isMetadataColumn {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.UnresolvedFunction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".UnresolvedFunction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "function_name"), + 2: .same(proto: "arguments"), + 3: .standard(proto: "is_distinct"), + 4: .standard(proto: "is_user_defined_function"), + 5: .standard(proto: "is_internal"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.functionName) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.arguments) }() + case 3: try { try decoder.decodeSingularBoolField(value: &self.isDistinct) }() + case 4: try { try decoder.decodeSingularBoolField(value: &self.isUserDefinedFunction) }() + case 5: try { try decoder.decodeSingularBoolField(value: &self._isInternal) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.functionName.isEmpty { + try visitor.visitSingularStringField(value: self.functionName, fieldNumber: 1) + } + if !self.arguments.isEmpty { + try visitor.visitRepeatedMessageField(value: self.arguments, fieldNumber: 2) + } + if self.isDistinct != false { + try visitor.visitSingularBoolField(value: self.isDistinct, fieldNumber: 3) + } + if self.isUserDefinedFunction != false { + try visitor.visitSingularBoolField(value: self.isUserDefinedFunction, fieldNumber: 4) + } + try { if let v = self._isInternal { + try visitor.visitSingularBoolField(value: v, fieldNumber: 5) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.UnresolvedFunction, rhs: Spark_Connect_Expression.UnresolvedFunction) -> Bool { + if lhs.functionName != rhs.functionName {return false} + if lhs.arguments != rhs.arguments {return false} + if lhs.isDistinct != rhs.isDistinct {return false} + if lhs.isUserDefinedFunction != rhs.isUserDefinedFunction {return false} + if lhs._isInternal != rhs._isInternal {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.ExpressionString: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".ExpressionString" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "expression"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.expression) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.expression.isEmpty { + try visitor.visitSingularStringField(value: self.expression, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.ExpressionString, rhs: Spark_Connect_Expression.ExpressionString) -> Bool { + if lhs.expression != rhs.expression {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.UnresolvedStar: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".UnresolvedStar" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "unparsed_target"), + 2: .standard(proto: "plan_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._unparsedTarget) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self._planID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._unparsedTarget { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._planID { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.UnresolvedStar, rhs: Spark_Connect_Expression.UnresolvedStar) -> Bool { + if lhs._unparsedTarget != rhs._unparsedTarget {return false} + if lhs._planID != rhs._planID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.UnresolvedRegex: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".UnresolvedRegex" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "col_name"), + 2: .standard(proto: "plan_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.colName) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self._planID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.colName.isEmpty { + try visitor.visitSingularStringField(value: self.colName, fieldNumber: 1) + } + try { if let v = self._planID { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.UnresolvedRegex, rhs: Spark_Connect_Expression.UnresolvedRegex) -> Bool { + if lhs.colName != rhs.colName {return false} + if lhs._planID != rhs._planID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.UnresolvedExtractValue: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".UnresolvedExtractValue" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "child"), + 2: .same(proto: "extraction"), + ] + + fileprivate class _StorageClass { + var _child: Spark_Connect_Expression? = nil + var _extraction: Spark_Connect_Expression? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _child = source._child + _extraction = source._extraction + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._child) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._extraction) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._child { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._extraction { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.UnresolvedExtractValue, rhs: Spark_Connect_Expression.UnresolvedExtractValue) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._child != rhs_storage._child {return false} + if _storage._extraction != rhs_storage._extraction {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.UpdateFields: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".UpdateFields" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "struct_expression"), + 2: .standard(proto: "field_name"), + 3: .standard(proto: "value_expression"), + ] + + fileprivate class _StorageClass { + var _structExpression: Spark_Connect_Expression? = nil + var _fieldName: String = String() + var _valueExpression: Spark_Connect_Expression? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _structExpression = source._structExpression + _fieldName = source._fieldName + _valueExpression = source._valueExpression + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._structExpression) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._fieldName) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._valueExpression) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._structExpression { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._fieldName.isEmpty { + try visitor.visitSingularStringField(value: _storage._fieldName, fieldNumber: 2) + } + try { if let v = _storage._valueExpression { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.UpdateFields, rhs: Spark_Connect_Expression.UpdateFields) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._structExpression != rhs_storage._structExpression {return false} + if _storage._fieldName != rhs_storage._fieldName {return false} + if _storage._valueExpression != rhs_storage._valueExpression {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.Alias: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".Alias" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "expr"), + 2: .same(proto: "name"), + 3: .same(proto: "metadata"), + ] + + fileprivate class _StorageClass { + var _expr: Spark_Connect_Expression? = nil + var _name: [String] = [] + var _metadata: String? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _expr = source._expr + _name = source._name + _metadata = source._metadata + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._expr) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._name) }() + case 3: try { try decoder.decodeSingularStringField(value: &_storage._metadata) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._expr { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._name.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._name, fieldNumber: 2) + } + try { if let v = _storage._metadata { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.Alias, rhs: Spark_Connect_Expression.Alias) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._expr != rhs_storage._expr {return false} + if _storage._name != rhs_storage._name {return false} + if _storage._metadata != rhs_storage._metadata {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.LambdaFunction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".LambdaFunction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "function"), + 2: .same(proto: "arguments"), + ] + + fileprivate class _StorageClass { + var _function: Spark_Connect_Expression? = nil + var _arguments: [Spark_Connect_Expression.UnresolvedNamedLambdaVariable] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _function = source._function + _arguments = source._arguments + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._function) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._arguments) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._function { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._arguments.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._arguments, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.LambdaFunction, rhs: Spark_Connect_Expression.LambdaFunction) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._function != rhs_storage._function {return false} + if _storage._arguments != rhs_storage._arguments {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Expression.UnresolvedNamedLambdaVariable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Expression.protoMessageName + ".UnresolvedNamedLambdaVariable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "name_parts"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedStringField(value: &self.nameParts) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.nameParts.isEmpty { + try visitor.visitRepeatedStringField(value: self.nameParts, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Expression.UnresolvedNamedLambdaVariable, rhs: Spark_Connect_Expression.UnresolvedNamedLambdaVariable) -> Bool { + if lhs.nameParts != rhs.nameParts {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ExpressionCommon: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ExpressionCommon" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "origin"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._origin) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._origin { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ExpressionCommon, rhs: Spark_Connect_ExpressionCommon) -> Bool { + if lhs._origin != rhs._origin {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CommonInlineUserDefinedFunction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CommonInlineUserDefinedFunction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "function_name"), + 2: .same(proto: "deterministic"), + 3: .same(proto: "arguments"), + 4: .standard(proto: "python_udf"), + 5: .standard(proto: "scalar_scala_udf"), + 6: .standard(proto: "java_udf"), + 7: .standard(proto: "is_distinct"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.functionName) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self.deterministic) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &self.arguments) }() + case 4: try { + var v: Spark_Connect_PythonUDF? + var hadOneofValue = false + if let current = self.function { + hadOneofValue = true + if case .pythonUdf(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.function = .pythonUdf(v) + } + }() + case 5: try { + var v: Spark_Connect_ScalarScalaUDF? + var hadOneofValue = false + if let current = self.function { + hadOneofValue = true + if case .scalarScalaUdf(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.function = .scalarScalaUdf(v) + } + }() + case 6: try { + var v: Spark_Connect_JavaUDF? + var hadOneofValue = false + if let current = self.function { + hadOneofValue = true + if case .javaUdf(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.function = .javaUdf(v) + } + }() + case 7: try { try decoder.decodeSingularBoolField(value: &self.isDistinct) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.functionName.isEmpty { + try visitor.visitSingularStringField(value: self.functionName, fieldNumber: 1) + } + if self.deterministic != false { + try visitor.visitSingularBoolField(value: self.deterministic, fieldNumber: 2) + } + if !self.arguments.isEmpty { + try visitor.visitRepeatedMessageField(value: self.arguments, fieldNumber: 3) + } + switch self.function { + case .pythonUdf?: try { + guard case .pythonUdf(let v)? = self.function else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .scalarScalaUdf?: try { + guard case .scalarScalaUdf(let v)? = self.function else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .javaUdf?: try { + guard case .javaUdf(let v)? = self.function else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case nil: break + } + if self.isDistinct != false { + try visitor.visitSingularBoolField(value: self.isDistinct, fieldNumber: 7) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CommonInlineUserDefinedFunction, rhs: Spark_Connect_CommonInlineUserDefinedFunction) -> Bool { + if lhs.functionName != rhs.functionName {return false} + if lhs.deterministic != rhs.deterministic {return false} + if lhs.arguments != rhs.arguments {return false} + if lhs.function != rhs.function {return false} + if lhs.isDistinct != rhs.isDistinct {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_PythonUDF: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".PythonUDF" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "output_type"), + 2: .standard(proto: "eval_type"), + 3: .same(proto: "command"), + 4: .standard(proto: "python_ver"), + 5: .standard(proto: "additional_includes"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._outputType) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self.evalType) }() + case 3: try { try decoder.decodeSingularBytesField(value: &self.command) }() + case 4: try { try decoder.decodeSingularStringField(value: &self.pythonVer) }() + case 5: try { try decoder.decodeRepeatedStringField(value: &self.additionalIncludes) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._outputType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if self.evalType != 0 { + try visitor.visitSingularInt32Field(value: self.evalType, fieldNumber: 2) + } + if !self.command.isEmpty { + try visitor.visitSingularBytesField(value: self.command, fieldNumber: 3) + } + if !self.pythonVer.isEmpty { + try visitor.visitSingularStringField(value: self.pythonVer, fieldNumber: 4) + } + if !self.additionalIncludes.isEmpty { + try visitor.visitRepeatedStringField(value: self.additionalIncludes, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_PythonUDF, rhs: Spark_Connect_PythonUDF) -> Bool { + if lhs._outputType != rhs._outputType {return false} + if lhs.evalType != rhs.evalType {return false} + if lhs.command != rhs.command {return false} + if lhs.pythonVer != rhs.pythonVer {return false} + if lhs.additionalIncludes != rhs.additionalIncludes {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ScalarScalaUDF: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ScalarScalaUDF" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "payload"), + 2: .same(proto: "inputTypes"), + 3: .same(proto: "outputType"), + 4: .same(proto: "nullable"), + 5: .same(proto: "aggregate"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBytesField(value: &self.payload) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.inputTypes) }() + case 3: try { try decoder.decodeSingularMessageField(value: &self._outputType) }() + case 4: try { try decoder.decodeSingularBoolField(value: &self.nullable) }() + case 5: try { try decoder.decodeSingularBoolField(value: &self.aggregate) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.payload.isEmpty { + try visitor.visitSingularBytesField(value: self.payload, fieldNumber: 1) + } + if !self.inputTypes.isEmpty { + try visitor.visitRepeatedMessageField(value: self.inputTypes, fieldNumber: 2) + } + try { if let v = self._outputType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if self.nullable != false { + try visitor.visitSingularBoolField(value: self.nullable, fieldNumber: 4) + } + if self.aggregate != false { + try visitor.visitSingularBoolField(value: self.aggregate, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ScalarScalaUDF, rhs: Spark_Connect_ScalarScalaUDF) -> Bool { + if lhs.payload != rhs.payload {return false} + if lhs.inputTypes != rhs.inputTypes {return false} + if lhs._outputType != rhs._outputType {return false} + if lhs.nullable != rhs.nullable {return false} + if lhs.aggregate != rhs.aggregate {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_JavaUDF: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".JavaUDF" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "class_name"), + 2: .standard(proto: "output_type"), + 3: .same(proto: "aggregate"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.className) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._outputType) }() + case 3: try { try decoder.decodeSingularBoolField(value: &self.aggregate) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.className.isEmpty { + try visitor.visitSingularStringField(value: self.className, fieldNumber: 1) + } + try { if let v = self._outputType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if self.aggregate != false { + try visitor.visitSingularBoolField(value: self.aggregate, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_JavaUDF, rhs: Spark_Connect_JavaUDF) -> Bool { + if lhs.className != rhs.className {return false} + if lhs._outputType != rhs._outputType {return false} + if lhs.aggregate != rhs.aggregate {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_TypedAggregateExpression: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".TypedAggregateExpression" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "scalar_scala_udf"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._scalarScalaUdf) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._scalarScalaUdf { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_TypedAggregateExpression, rhs: Spark_Connect_TypedAggregateExpression) -> Bool { + if lhs._scalarScalaUdf != rhs._scalarScalaUdf {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CallFunction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CallFunction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "function_name"), + 2: .same(proto: "arguments"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.functionName) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.arguments) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.functionName.isEmpty { + try visitor.visitSingularStringField(value: self.functionName, fieldNumber: 1) + } + if !self.arguments.isEmpty { + try visitor.visitRepeatedMessageField(value: self.arguments, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CallFunction, rhs: Spark_Connect_CallFunction) -> Bool { + if lhs.functionName != rhs.functionName {return false} + if lhs.arguments != rhs.arguments {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_NamedArgumentExpression: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".NamedArgumentExpression" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "key"), + 2: .same(proto: "value"), + ] + + fileprivate class _StorageClass { + var _key: String = String() + var _value: Spark_Connect_Expression? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _key = source._key + _value = source._value + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &_storage._key) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._value) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !_storage._key.isEmpty { + try visitor.visitSingularStringField(value: _storage._key, fieldNumber: 1) + } + try { if let v = _storage._value { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_NamedArgumentExpression, rhs: Spark_Connect_NamedArgumentExpression) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._key != rhs_storage._key {return false} + if _storage._value != rhs_storage._value {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MergeAction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".MergeAction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "action_type"), + 2: .same(proto: "condition"), + 3: .same(proto: "assignments"), + ] + + fileprivate class _StorageClass { + var _actionType: Spark_Connect_MergeAction.ActionType = .invalid + var _condition: Spark_Connect_Expression? = nil + var _assignments: [Spark_Connect_MergeAction.Assignment] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _actionType = source._actionType + _condition = source._condition + _assignments = source._assignments + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularEnumField(value: &_storage._actionType) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._condition) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._assignments) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if _storage._actionType != .invalid { + try visitor.visitSingularEnumField(value: _storage._actionType, fieldNumber: 1) + } + try { if let v = _storage._condition { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if !_storage._assignments.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._assignments, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MergeAction, rhs: Spark_Connect_MergeAction) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._actionType != rhs_storage._actionType {return false} + if _storage._condition != rhs_storage._condition {return false} + if _storage._assignments != rhs_storage._assignments {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MergeAction.ActionType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "ACTION_TYPE_INVALID"), + 1: .same(proto: "ACTION_TYPE_DELETE"), + 2: .same(proto: "ACTION_TYPE_INSERT"), + 3: .same(proto: "ACTION_TYPE_INSERT_STAR"), + 4: .same(proto: "ACTION_TYPE_UPDATE"), + 5: .same(proto: "ACTION_TYPE_UPDATE_STAR"), + ] +} + +extension Spark_Connect_MergeAction.Assignment: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_MergeAction.protoMessageName + ".Assignment" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "key"), + 2: .same(proto: "value"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._key) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._value) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._key { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._value { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MergeAction.Assignment, rhs: Spark_Connect_MergeAction.Assignment) -> Bool { + if lhs._key != rhs._key {return false} + if lhs._value != rhs._value {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_SubqueryExpression: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".SubqueryExpression" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "plan_id"), + 2: .standard(proto: "subquery_type"), + 3: .standard(proto: "table_arg_options"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt64Field(value: &self.planID) }() + case 2: try { try decoder.decodeSingularEnumField(value: &self.subqueryType) }() + case 3: try { try decoder.decodeSingularMessageField(value: &self._tableArgOptions) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if self.planID != 0 { + try visitor.visitSingularInt64Field(value: self.planID, fieldNumber: 1) + } + if self.subqueryType != .unknown { + try visitor.visitSingularEnumField(value: self.subqueryType, fieldNumber: 2) + } + try { if let v = self._tableArgOptions { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_SubqueryExpression, rhs: Spark_Connect_SubqueryExpression) -> Bool { + if lhs.planID != rhs.planID {return false} + if lhs.subqueryType != rhs.subqueryType {return false} + if lhs._tableArgOptions != rhs._tableArgOptions {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_SubqueryExpression.SubqueryType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SUBQUERY_TYPE_UNKNOWN"), + 1: .same(proto: "SUBQUERY_TYPE_SCALAR"), + 2: .same(proto: "SUBQUERY_TYPE_EXISTS"), + 3: .same(proto: "SUBQUERY_TYPE_TABLE_ARG"), + ] +} + +extension Spark_Connect_SubqueryExpression.TableArgOptions: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_SubqueryExpression.protoMessageName + ".TableArgOptions" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "partition_spec"), + 2: .standard(proto: "order_spec"), + 3: .standard(proto: "with_single_partition"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.partitionSpec) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.orderSpec) }() + case 3: try { try decoder.decodeSingularBoolField(value: &self._withSinglePartition) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.partitionSpec.isEmpty { + try visitor.visitRepeatedMessageField(value: self.partitionSpec, fieldNumber: 1) + } + if !self.orderSpec.isEmpty { + try visitor.visitRepeatedMessageField(value: self.orderSpec, fieldNumber: 2) + } + try { if let v = self._withSinglePartition { + try visitor.visitSingularBoolField(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_SubqueryExpression.TableArgOptions, rhs: Spark_Connect_SubqueryExpression.TableArgOptions) -> Bool { + if lhs.partitionSpec != rhs.partitionSpec {return false} + if lhs.orderSpec != rhs.orderSpec {return false} + if lhs._withSinglePartition != rhs._withSinglePartition {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Sources/SparkConnect/ml.grpc.swift b/Sources/SparkConnect/ml.grpc.swift new file mode 100644 index 0000000..75c8e0d --- /dev/null +++ b/Sources/SparkConnect/ml.grpc.swift @@ -0,0 +1,26 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/ml.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/ml.pb.swift b/Sources/SparkConnect/ml.pb.swift new file mode 100644 index 0000000..6fd4b8a --- /dev/null +++ b/Sources/SparkConnect/ml.pb.swift @@ -0,0 +1,1004 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/ml.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +/// Command for ML +struct Spark_Connect_MlCommand: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var command: Spark_Connect_MlCommand.OneOf_Command? = nil + + var fit: Spark_Connect_MlCommand.Fit { + get { + if case .fit(let v)? = command {return v} + return Spark_Connect_MlCommand.Fit() + } + set {command = .fit(newValue)} + } + + var fetch: Spark_Connect_Fetch { + get { + if case .fetch(let v)? = command {return v} + return Spark_Connect_Fetch() + } + set {command = .fetch(newValue)} + } + + var delete: Spark_Connect_MlCommand.Delete { + get { + if case .delete(let v)? = command {return v} + return Spark_Connect_MlCommand.Delete() + } + set {command = .delete(newValue)} + } + + var write: Spark_Connect_MlCommand.Write { + get { + if case .write(let v)? = command {return v} + return Spark_Connect_MlCommand.Write() + } + set {command = .write(newValue)} + } + + var read: Spark_Connect_MlCommand.Read { + get { + if case .read(let v)? = command {return v} + return Spark_Connect_MlCommand.Read() + } + set {command = .read(newValue)} + } + + var evaluate: Spark_Connect_MlCommand.Evaluate { + get { + if case .evaluate(let v)? = command {return v} + return Spark_Connect_MlCommand.Evaluate() + } + set {command = .evaluate(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Command: Equatable, Sendable { + case fit(Spark_Connect_MlCommand.Fit) + case fetch(Spark_Connect_Fetch) + case delete(Spark_Connect_MlCommand.Delete) + case write(Spark_Connect_MlCommand.Write) + case read(Spark_Connect_MlCommand.Read) + case evaluate(Spark_Connect_MlCommand.Evaluate) + + } + + /// Command for estimator.fit(dataset) + struct Fit: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Estimator information (its type should be OPERATOR_TYPE_ESTIMATOR) + var estimator: Spark_Connect_MlOperator { + get {return _estimator ?? Spark_Connect_MlOperator()} + set {_estimator = newValue} + } + /// Returns true if `estimator` has been explicitly set. + var hasEstimator: Bool {return self._estimator != nil} + /// Clears the value of `estimator`. Subsequent reads from it will return its default value. + mutating func clearEstimator() {self._estimator = nil} + + /// (Optional) parameters of the Estimator + var params: Spark_Connect_MlParams { + get {return _params ?? Spark_Connect_MlParams()} + set {_params = newValue} + } + /// Returns true if `params` has been explicitly set. + var hasParams: Bool {return self._params != nil} + /// Clears the value of `params`. Subsequent reads from it will return its default value. + mutating func clearParams() {self._params = nil} + + /// (Required) the training dataset + var dataset: Spark_Connect_Relation { + get {return _dataset ?? Spark_Connect_Relation()} + set {_dataset = newValue} + } + /// Returns true if `dataset` has been explicitly set. + var hasDataset: Bool {return self._dataset != nil} + /// Clears the value of `dataset`. Subsequent reads from it will return its default value. + mutating func clearDataset() {self._dataset = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _estimator: Spark_Connect_MlOperator? = nil + fileprivate var _params: Spark_Connect_MlParams? = nil + fileprivate var _dataset: Spark_Connect_Relation? = nil + } + + /// Command to delete the cached object which could be a model + /// or summary evaluated by a model + struct Delete: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var objRef: Spark_Connect_ObjectRef { + get {return _objRef ?? Spark_Connect_ObjectRef()} + set {_objRef = newValue} + } + /// Returns true if `objRef` has been explicitly set. + var hasObjRef: Bool {return self._objRef != nil} + /// Clears the value of `objRef`. Subsequent reads from it will return its default value. + mutating func clearObjRef() {self._objRef = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _objRef: Spark_Connect_ObjectRef? = nil + } + + /// Command to write ML operator + struct Write: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// It could be an estimator/evaluator or the cached model + var type: Spark_Connect_MlCommand.Write.OneOf_Type? = nil + + /// Estimator or evaluator + var `operator`: Spark_Connect_MlOperator { + get { + if case .operator(let v)? = type {return v} + return Spark_Connect_MlOperator() + } + set {type = .operator(newValue)} + } + + /// The cached model + var objRef: Spark_Connect_ObjectRef { + get { + if case .objRef(let v)? = type {return v} + return Spark_Connect_ObjectRef() + } + set {type = .objRef(newValue)} + } + + /// (Optional) The parameters of operator which could be estimator/evaluator or a cached model + var params: Spark_Connect_MlParams { + get {return _params ?? Spark_Connect_MlParams()} + set {_params = newValue} + } + /// Returns true if `params` has been explicitly set. + var hasParams: Bool {return self._params != nil} + /// Clears the value of `params`. Subsequent reads from it will return its default value. + mutating func clearParams() {self._params = nil} + + /// (Required) Save the ML instance to the path + var path: String = String() + + /// (Optional) Overwrites if the output path already exists. + var shouldOverwrite: Bool { + get {return _shouldOverwrite ?? false} + set {_shouldOverwrite = newValue} + } + /// Returns true if `shouldOverwrite` has been explicitly set. + var hasShouldOverwrite: Bool {return self._shouldOverwrite != nil} + /// Clears the value of `shouldOverwrite`. Subsequent reads from it will return its default value. + mutating func clearShouldOverwrite() {self._shouldOverwrite = nil} + + /// (Optional) The options of the writer + var options: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// It could be an estimator/evaluator or the cached model + enum OneOf_Type: Equatable, Sendable { + /// Estimator or evaluator + case `operator`(Spark_Connect_MlOperator) + /// The cached model + case objRef(Spark_Connect_ObjectRef) + + } + + init() {} + + fileprivate var _params: Spark_Connect_MlParams? = nil + fileprivate var _shouldOverwrite: Bool? = nil + } + + /// Command to load ML operator. + struct Read: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) ML operator information + var `operator`: Spark_Connect_MlOperator { + get {return _operator ?? Spark_Connect_MlOperator()} + set {_operator = newValue} + } + /// Returns true if ``operator`` has been explicitly set. + var hasOperator: Bool {return self._operator != nil} + /// Clears the value of ``operator``. Subsequent reads from it will return its default value. + mutating func clearOperator() {self._operator = nil} + + /// (Required) Load the ML instance from the input path + var path: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _operator: Spark_Connect_MlOperator? = nil + } + + /// Command for evaluator.evaluate(dataset) + struct Evaluate: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Evaluator information (its type should be OPERATOR_TYPE_EVALUATOR) + var evaluator: Spark_Connect_MlOperator { + get {return _evaluator ?? Spark_Connect_MlOperator()} + set {_evaluator = newValue} + } + /// Returns true if `evaluator` has been explicitly set. + var hasEvaluator: Bool {return self._evaluator != nil} + /// Clears the value of `evaluator`. Subsequent reads from it will return its default value. + mutating func clearEvaluator() {self._evaluator = nil} + + /// (Optional) parameters of the Evaluator + var params: Spark_Connect_MlParams { + get {return _params ?? Spark_Connect_MlParams()} + set {_params = newValue} + } + /// Returns true if `params` has been explicitly set. + var hasParams: Bool {return self._params != nil} + /// Clears the value of `params`. Subsequent reads from it will return its default value. + mutating func clearParams() {self._params = nil} + + /// (Required) the evaluating dataset + var dataset: Spark_Connect_Relation { + get {return _dataset ?? Spark_Connect_Relation()} + set {_dataset = newValue} + } + /// Returns true if `dataset` has been explicitly set. + var hasDataset: Bool {return self._dataset != nil} + /// Clears the value of `dataset`. Subsequent reads from it will return its default value. + mutating func clearDataset() {self._dataset = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _evaluator: Spark_Connect_MlOperator? = nil + fileprivate var _params: Spark_Connect_MlParams? = nil + fileprivate var _dataset: Spark_Connect_Relation? = nil + } + + init() {} +} + +/// The result of MlCommand +struct Spark_Connect_MlCommandResult: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var resultType: Spark_Connect_MlCommandResult.OneOf_ResultType? = nil + + /// The result of the attribute + var param: Spark_Connect_Expression.Literal { + get { + if case .param(let v)? = resultType {return v} + return Spark_Connect_Expression.Literal() + } + set {resultType = .param(newValue)} + } + + /// Evaluate a Dataset in a model and return the cached ID of summary + var summary: String { + get { + if case .summary(let v)? = resultType {return v} + return String() + } + set {resultType = .summary(newValue)} + } + + /// Operator information + var operatorInfo: Spark_Connect_MlCommandResult.MlOperatorInfo { + get { + if case .operatorInfo(let v)? = resultType {return v} + return Spark_Connect_MlCommandResult.MlOperatorInfo() + } + set {resultType = .operatorInfo(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_ResultType: Equatable, Sendable { + /// The result of the attribute + case param(Spark_Connect_Expression.Literal) + /// Evaluate a Dataset in a model and return the cached ID of summary + case summary(String) + /// Operator information + case operatorInfo(Spark_Connect_MlCommandResult.MlOperatorInfo) + + } + + /// Represents an operator info + struct MlOperatorInfo: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var type: Spark_Connect_MlCommandResult.MlOperatorInfo.OneOf_Type? = nil + + /// The cached object which could be a model or summary evaluated by a model + var objRef: Spark_Connect_ObjectRef { + get { + if case .objRef(let v)? = type {return v} + return Spark_Connect_ObjectRef() + } + set {type = .objRef(newValue)} + } + + /// Operator name + var name: String { + get { + if case .name(let v)? = type {return v} + return String() + } + set {type = .name(newValue)} + } + + /// (Optional) the 'uid' of a ML object + /// Note it is different from the 'id' of a cached object. + var uid: String { + get {return _uid ?? String()} + set {_uid = newValue} + } + /// Returns true if `uid` has been explicitly set. + var hasUid: Bool {return self._uid != nil} + /// Clears the value of `uid`. Subsequent reads from it will return its default value. + mutating func clearUid() {self._uid = nil} + + /// (Optional) parameters + var params: Spark_Connect_MlParams { + get {return _params ?? Spark_Connect_MlParams()} + set {_params = newValue} + } + /// Returns true if `params` has been explicitly set. + var hasParams: Bool {return self._params != nil} + /// Clears the value of `params`. Subsequent reads from it will return its default value. + mutating func clearParams() {self._params = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Type: Equatable, Sendable { + /// The cached object which could be a model or summary evaluated by a model + case objRef(Spark_Connect_ObjectRef) + /// Operator name + case name(String) + + } + + init() {} + + fileprivate var _uid: String? = nil + fileprivate var _params: Spark_Connect_MlParams? = nil + } + + init() {} +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_MlCommand: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".MlCommand" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "fit"), + 2: .same(proto: "fetch"), + 3: .same(proto: "delete"), + 4: .same(proto: "write"), + 5: .same(proto: "read"), + 6: .same(proto: "evaluate"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_MlCommand.Fit? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .fit(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .fit(v) + } + }() + case 2: try { + var v: Spark_Connect_Fetch? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .fetch(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .fetch(v) + } + }() + case 3: try { + var v: Spark_Connect_MlCommand.Delete? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .delete(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .delete(v) + } + }() + case 4: try { + var v: Spark_Connect_MlCommand.Write? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .write(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .write(v) + } + }() + case 5: try { + var v: Spark_Connect_MlCommand.Read? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .read(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .read(v) + } + }() + case 6: try { + var v: Spark_Connect_MlCommand.Evaluate? + var hadOneofValue = false + if let current = self.command { + hadOneofValue = true + if case .evaluate(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.command = .evaluate(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.command { + case .fit?: try { + guard case .fit(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .fetch?: try { + guard case .fetch(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .delete?: try { + guard case .delete(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .write?: try { + guard case .write(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .read?: try { + guard case .read(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .evaluate?: try { + guard case .evaluate(let v)? = self.command else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlCommand, rhs: Spark_Connect_MlCommand) -> Bool { + if lhs.command != rhs.command {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlCommand.Fit: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_MlCommand.protoMessageName + ".Fit" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "estimator"), + 2: .same(proto: "params"), + 3: .same(proto: "dataset"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._estimator) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._params) }() + case 3: try { try decoder.decodeSingularMessageField(value: &self._dataset) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._estimator { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._params { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = self._dataset { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlCommand.Fit, rhs: Spark_Connect_MlCommand.Fit) -> Bool { + if lhs._estimator != rhs._estimator {return false} + if lhs._params != rhs._params {return false} + if lhs._dataset != rhs._dataset {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlCommand.Delete: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_MlCommand.protoMessageName + ".Delete" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "obj_ref"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._objRef) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._objRef { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlCommand.Delete, rhs: Spark_Connect_MlCommand.Delete) -> Bool { + if lhs._objRef != rhs._objRef {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlCommand.Write: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_MlCommand.protoMessageName + ".Write" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "operator"), + 2: .standard(proto: "obj_ref"), + 3: .same(proto: "params"), + 4: .same(proto: "path"), + 5: .standard(proto: "should_overwrite"), + 6: .same(proto: "options"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_MlOperator? + var hadOneofValue = false + if let current = self.type { + hadOneofValue = true + if case .operator(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.type = .operator(v) + } + }() + case 2: try { + var v: Spark_Connect_ObjectRef? + var hadOneofValue = false + if let current = self.type { + hadOneofValue = true + if case .objRef(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.type = .objRef(v) + } + }() + case 3: try { try decoder.decodeSingularMessageField(value: &self._params) }() + case 4: try { try decoder.decodeSingularStringField(value: &self.path) }() + case 5: try { try decoder.decodeSingularBoolField(value: &self._shouldOverwrite) }() + case 6: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.options) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.type { + case .operator?: try { + guard case .operator(let v)? = self.type else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .objRef?: try { + guard case .objRef(let v)? = self.type else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case nil: break + } + try { if let v = self._params { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if !self.path.isEmpty { + try visitor.visitSingularStringField(value: self.path, fieldNumber: 4) + } + try { if let v = self._shouldOverwrite { + try visitor.visitSingularBoolField(value: v, fieldNumber: 5) + } }() + if !self.options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.options, fieldNumber: 6) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlCommand.Write, rhs: Spark_Connect_MlCommand.Write) -> Bool { + if lhs.type != rhs.type {return false} + if lhs._params != rhs._params {return false} + if lhs.path != rhs.path {return false} + if lhs._shouldOverwrite != rhs._shouldOverwrite {return false} + if lhs.options != rhs.options {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlCommand.Read: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_MlCommand.protoMessageName + ".Read" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "operator"), + 2: .same(proto: "path"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._operator) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.path) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._operator { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.path.isEmpty { + try visitor.visitSingularStringField(value: self.path, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlCommand.Read, rhs: Spark_Connect_MlCommand.Read) -> Bool { + if lhs._operator != rhs._operator {return false} + if lhs.path != rhs.path {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlCommand.Evaluate: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_MlCommand.protoMessageName + ".Evaluate" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "evaluator"), + 2: .same(proto: "params"), + 3: .same(proto: "dataset"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._evaluator) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._params) }() + case 3: try { try decoder.decodeSingularMessageField(value: &self._dataset) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._evaluator { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._params { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = self._dataset { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlCommand.Evaluate, rhs: Spark_Connect_MlCommand.Evaluate) -> Bool { + if lhs._evaluator != rhs._evaluator {return false} + if lhs._params != rhs._params {return false} + if lhs._dataset != rhs._dataset {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlCommandResult: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".MlCommandResult" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "param"), + 2: .same(proto: "summary"), + 3: .standard(proto: "operator_info"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_Expression.Literal? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .param(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .param(v) + } + }() + case 2: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if self.resultType != nil {try decoder.handleConflictingOneOf()} + self.resultType = .summary(v) + } + }() + case 3: try { + var v: Spark_Connect_MlCommandResult.MlOperatorInfo? + var hadOneofValue = false + if let current = self.resultType { + hadOneofValue = true + if case .operatorInfo(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.resultType = .operatorInfo(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.resultType { + case .param?: try { + guard case .param(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .summary?: try { + guard case .summary(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + }() + case .operatorInfo?: try { + guard case .operatorInfo(let v)? = self.resultType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlCommandResult, rhs: Spark_Connect_MlCommandResult) -> Bool { + if lhs.resultType != rhs.resultType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlCommandResult.MlOperatorInfo: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_MlCommandResult.protoMessageName + ".MlOperatorInfo" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "obj_ref"), + 2: .same(proto: "name"), + 3: .same(proto: "uid"), + 4: .same(proto: "params"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_ObjectRef? + var hadOneofValue = false + if let current = self.type { + hadOneofValue = true + if case .objRef(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.type = .objRef(v) + } + }() + case 2: try { + var v: String? + try decoder.decodeSingularStringField(value: &v) + if let v = v { + if self.type != nil {try decoder.handleConflictingOneOf()} + self.type = .name(v) + } + }() + case 3: try { try decoder.decodeSingularStringField(value: &self._uid) }() + case 4: try { try decoder.decodeSingularMessageField(value: &self._params) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.type { + case .objRef?: try { + guard case .objRef(let v)? = self.type else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .name?: try { + guard case .name(let v)? = self.type else { preconditionFailure() } + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + }() + case nil: break + } + try { if let v = self._uid { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + try { if let v = self._params { + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlCommandResult.MlOperatorInfo, rhs: Spark_Connect_MlCommandResult.MlOperatorInfo) -> Bool { + if lhs.type != rhs.type {return false} + if lhs._uid != rhs._uid {return false} + if lhs._params != rhs._params {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Sources/SparkConnect/ml_common.grpc.swift b/Sources/SparkConnect/ml_common.grpc.swift new file mode 100644 index 0000000..9b19676 --- /dev/null +++ b/Sources/SparkConnect/ml_common.grpc.swift @@ -0,0 +1,26 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/ml_common.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/ml_common.pb.swift b/Sources/SparkConnect/ml_common.pb.swift new file mode 100644 index 0000000..a3ef880 --- /dev/null +++ b/Sources/SparkConnect/ml_common.pb.swift @@ -0,0 +1,263 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/ml_common.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +/// MlParams stores param settings for ML Estimator / Transformer / Evaluator +struct Spark_Connect_MlParams: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// User-supplied params + var params: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// MLOperator represents the ML operators like (Estimator, Transformer or Evaluator) +struct Spark_Connect_MlOperator: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The qualified name of the ML operator. + var name: String = String() + + /// (Required) Unique id of the ML operator + var uid: String = String() + + /// (Required) Represents what the ML operator is + var type: Spark_Connect_MlOperator.OperatorType = .unspecified + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OperatorType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + + /// ML estimator + case estimator // = 1 + + /// ML transformer (non-model) + case transformer // = 2 + + /// ML evaluator + case evaluator // = 3 + + /// ML model + case model // = 4 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .estimator + case 2: self = .transformer + case 3: self = .evaluator + case 4: self = .model + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .estimator: return 1 + case .transformer: return 2 + case .evaluator: return 3 + case .model: return 4 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_MlOperator.OperatorType] = [ + .unspecified, + .estimator, + .transformer, + .evaluator, + .model, + ] + + } + + init() {} +} + +/// Represents a reference to the cached object which could be a model +/// or summary evaluated by a model +struct Spark_Connect_ObjectRef: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The ID is used to lookup the object on the server side. + /// Note it is different from the 'uid' of a ML object. + var id: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_MlParams: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".MlParams" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "params"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.params) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.params.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.params, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlParams, rhs: Spark_Connect_MlParams) -> Bool { + if lhs.params != rhs.params {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlOperator: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".MlOperator" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .same(proto: "uid"), + 3: .same(proto: "type"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.uid) }() + case 3: try { try decoder.decodeSingularEnumField(value: &self.type) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + if !self.uid.isEmpty { + try visitor.visitSingularStringField(value: self.uid, fieldNumber: 2) + } + if self.type != .unspecified { + try visitor.visitSingularEnumField(value: self.type, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlOperator, rhs: Spark_Connect_MlOperator) -> Bool { + if lhs.name != rhs.name {return false} + if lhs.uid != rhs.uid {return false} + if lhs.type != rhs.type {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlOperator.OperatorType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "OPERATOR_TYPE_UNSPECIFIED"), + 1: .same(proto: "OPERATOR_TYPE_ESTIMATOR"), + 2: .same(proto: "OPERATOR_TYPE_TRANSFORMER"), + 3: .same(proto: "OPERATOR_TYPE_EVALUATOR"), + 4: .same(proto: "OPERATOR_TYPE_MODEL"), + ] +} + +extension Spark_Connect_ObjectRef: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ObjectRef" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.id) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.id.isEmpty { + try visitor.visitSingularStringField(value: self.id, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ObjectRef, rhs: Spark_Connect_ObjectRef) -> Bool { + if lhs.id != rhs.id {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Sources/SparkConnect/relations.grpc.swift b/Sources/SparkConnect/relations.grpc.swift new file mode 100644 index 0000000..57a8084 --- /dev/null +++ b/Sources/SparkConnect/relations.grpc.swift @@ -0,0 +1,26 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/relations.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/relations.pb.swift b/Sources/SparkConnect/relations.pb.swift new file mode 100644 index 0000000..a438ed2 --- /dev/null +++ b/Sources/SparkConnect/relations.pb.swift @@ -0,0 +1,10559 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/relations.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import Foundation +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +/// The main [[Relation]] type. Fundamentally, a relation is a typed container +/// that has exactly one explicit relation type set. +/// +/// When adding new relation types, they have to be registered here. +struct Spark_Connect_Relation: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var common: Spark_Connect_RelationCommon { + get {return _storage._common ?? Spark_Connect_RelationCommon()} + set {_uniqueStorage()._common = newValue} + } + /// Returns true if `common` has been explicitly set. + var hasCommon: Bool {return _storage._common != nil} + /// Clears the value of `common`. Subsequent reads from it will return its default value. + mutating func clearCommon() {_uniqueStorage()._common = nil} + + var relType: OneOf_RelType? { + get {return _storage._relType} + set {_uniqueStorage()._relType = newValue} + } + + var read: Spark_Connect_Read { + get { + if case .read(let v)? = _storage._relType {return v} + return Spark_Connect_Read() + } + set {_uniqueStorage()._relType = .read(newValue)} + } + + var project: Spark_Connect_Project { + get { + if case .project(let v)? = _storage._relType {return v} + return Spark_Connect_Project() + } + set {_uniqueStorage()._relType = .project(newValue)} + } + + var filter: Spark_Connect_Filter { + get { + if case .filter(let v)? = _storage._relType {return v} + return Spark_Connect_Filter() + } + set {_uniqueStorage()._relType = .filter(newValue)} + } + + var join: Spark_Connect_Join { + get { + if case .join(let v)? = _storage._relType {return v} + return Spark_Connect_Join() + } + set {_uniqueStorage()._relType = .join(newValue)} + } + + var setOp: Spark_Connect_SetOperation { + get { + if case .setOp(let v)? = _storage._relType {return v} + return Spark_Connect_SetOperation() + } + set {_uniqueStorage()._relType = .setOp(newValue)} + } + + var sort: Spark_Connect_Sort { + get { + if case .sort(let v)? = _storage._relType {return v} + return Spark_Connect_Sort() + } + set {_uniqueStorage()._relType = .sort(newValue)} + } + + var limit: Spark_Connect_Limit { + get { + if case .limit(let v)? = _storage._relType {return v} + return Spark_Connect_Limit() + } + set {_uniqueStorage()._relType = .limit(newValue)} + } + + var aggregate: Spark_Connect_Aggregate { + get { + if case .aggregate(let v)? = _storage._relType {return v} + return Spark_Connect_Aggregate() + } + set {_uniqueStorage()._relType = .aggregate(newValue)} + } + + var sql: Spark_Connect_SQL { + get { + if case .sql(let v)? = _storage._relType {return v} + return Spark_Connect_SQL() + } + set {_uniqueStorage()._relType = .sql(newValue)} + } + + var localRelation: Spark_Connect_LocalRelation { + get { + if case .localRelation(let v)? = _storage._relType {return v} + return Spark_Connect_LocalRelation() + } + set {_uniqueStorage()._relType = .localRelation(newValue)} + } + + var sample: Spark_Connect_Sample { + get { + if case .sample(let v)? = _storage._relType {return v} + return Spark_Connect_Sample() + } + set {_uniqueStorage()._relType = .sample(newValue)} + } + + var offset: Spark_Connect_Offset { + get { + if case .offset(let v)? = _storage._relType {return v} + return Spark_Connect_Offset() + } + set {_uniqueStorage()._relType = .offset(newValue)} + } + + var deduplicate: Spark_Connect_Deduplicate { + get { + if case .deduplicate(let v)? = _storage._relType {return v} + return Spark_Connect_Deduplicate() + } + set {_uniqueStorage()._relType = .deduplicate(newValue)} + } + + var range: Spark_Connect_Range { + get { + if case .range(let v)? = _storage._relType {return v} + return Spark_Connect_Range() + } + set {_uniqueStorage()._relType = .range(newValue)} + } + + var subqueryAlias: Spark_Connect_SubqueryAlias { + get { + if case .subqueryAlias(let v)? = _storage._relType {return v} + return Spark_Connect_SubqueryAlias() + } + set {_uniqueStorage()._relType = .subqueryAlias(newValue)} + } + + var repartition: Spark_Connect_Repartition { + get { + if case .repartition(let v)? = _storage._relType {return v} + return Spark_Connect_Repartition() + } + set {_uniqueStorage()._relType = .repartition(newValue)} + } + + var toDf: Spark_Connect_ToDF { + get { + if case .toDf(let v)? = _storage._relType {return v} + return Spark_Connect_ToDF() + } + set {_uniqueStorage()._relType = .toDf(newValue)} + } + + var withColumnsRenamed: Spark_Connect_WithColumnsRenamed { + get { + if case .withColumnsRenamed(let v)? = _storage._relType {return v} + return Spark_Connect_WithColumnsRenamed() + } + set {_uniqueStorage()._relType = .withColumnsRenamed(newValue)} + } + + var showString: Spark_Connect_ShowString { + get { + if case .showString(let v)? = _storage._relType {return v} + return Spark_Connect_ShowString() + } + set {_uniqueStorage()._relType = .showString(newValue)} + } + + var drop: Spark_Connect_Drop { + get { + if case .drop(let v)? = _storage._relType {return v} + return Spark_Connect_Drop() + } + set {_uniqueStorage()._relType = .drop(newValue)} + } + + var tail: Spark_Connect_Tail { + get { + if case .tail(let v)? = _storage._relType {return v} + return Spark_Connect_Tail() + } + set {_uniqueStorage()._relType = .tail(newValue)} + } + + var withColumns: Spark_Connect_WithColumns { + get { + if case .withColumns(let v)? = _storage._relType {return v} + return Spark_Connect_WithColumns() + } + set {_uniqueStorage()._relType = .withColumns(newValue)} + } + + var hint: Spark_Connect_Hint { + get { + if case .hint(let v)? = _storage._relType {return v} + return Spark_Connect_Hint() + } + set {_uniqueStorage()._relType = .hint(newValue)} + } + + var unpivot: Spark_Connect_Unpivot { + get { + if case .unpivot(let v)? = _storage._relType {return v} + return Spark_Connect_Unpivot() + } + set {_uniqueStorage()._relType = .unpivot(newValue)} + } + + var toSchema: Spark_Connect_ToSchema { + get { + if case .toSchema(let v)? = _storage._relType {return v} + return Spark_Connect_ToSchema() + } + set {_uniqueStorage()._relType = .toSchema(newValue)} + } + + var repartitionByExpression: Spark_Connect_RepartitionByExpression { + get { + if case .repartitionByExpression(let v)? = _storage._relType {return v} + return Spark_Connect_RepartitionByExpression() + } + set {_uniqueStorage()._relType = .repartitionByExpression(newValue)} + } + + var mapPartitions: Spark_Connect_MapPartitions { + get { + if case .mapPartitions(let v)? = _storage._relType {return v} + return Spark_Connect_MapPartitions() + } + set {_uniqueStorage()._relType = .mapPartitions(newValue)} + } + + var collectMetrics: Spark_Connect_CollectMetrics { + get { + if case .collectMetrics(let v)? = _storage._relType {return v} + return Spark_Connect_CollectMetrics() + } + set {_uniqueStorage()._relType = .collectMetrics(newValue)} + } + + var parse: Spark_Connect_Parse { + get { + if case .parse(let v)? = _storage._relType {return v} + return Spark_Connect_Parse() + } + set {_uniqueStorage()._relType = .parse(newValue)} + } + + var groupMap: Spark_Connect_GroupMap { + get { + if case .groupMap(let v)? = _storage._relType {return v} + return Spark_Connect_GroupMap() + } + set {_uniqueStorage()._relType = .groupMap(newValue)} + } + + var coGroupMap: Spark_Connect_CoGroupMap { + get { + if case .coGroupMap(let v)? = _storage._relType {return v} + return Spark_Connect_CoGroupMap() + } + set {_uniqueStorage()._relType = .coGroupMap(newValue)} + } + + var withWatermark: Spark_Connect_WithWatermark { + get { + if case .withWatermark(let v)? = _storage._relType {return v} + return Spark_Connect_WithWatermark() + } + set {_uniqueStorage()._relType = .withWatermark(newValue)} + } + + var applyInPandasWithState: Spark_Connect_ApplyInPandasWithState { + get { + if case .applyInPandasWithState(let v)? = _storage._relType {return v} + return Spark_Connect_ApplyInPandasWithState() + } + set {_uniqueStorage()._relType = .applyInPandasWithState(newValue)} + } + + var htmlString: Spark_Connect_HtmlString { + get { + if case .htmlString(let v)? = _storage._relType {return v} + return Spark_Connect_HtmlString() + } + set {_uniqueStorage()._relType = .htmlString(newValue)} + } + + var cachedLocalRelation: Spark_Connect_CachedLocalRelation { + get { + if case .cachedLocalRelation(let v)? = _storage._relType {return v} + return Spark_Connect_CachedLocalRelation() + } + set {_uniqueStorage()._relType = .cachedLocalRelation(newValue)} + } + + var cachedRemoteRelation: Spark_Connect_CachedRemoteRelation { + get { + if case .cachedRemoteRelation(let v)? = _storage._relType {return v} + return Spark_Connect_CachedRemoteRelation() + } + set {_uniqueStorage()._relType = .cachedRemoteRelation(newValue)} + } + + var commonInlineUserDefinedTableFunction: Spark_Connect_CommonInlineUserDefinedTableFunction { + get { + if case .commonInlineUserDefinedTableFunction(let v)? = _storage._relType {return v} + return Spark_Connect_CommonInlineUserDefinedTableFunction() + } + set {_uniqueStorage()._relType = .commonInlineUserDefinedTableFunction(newValue)} + } + + var asOfJoin: Spark_Connect_AsOfJoin { + get { + if case .asOfJoin(let v)? = _storage._relType {return v} + return Spark_Connect_AsOfJoin() + } + set {_uniqueStorage()._relType = .asOfJoin(newValue)} + } + + var commonInlineUserDefinedDataSource: Spark_Connect_CommonInlineUserDefinedDataSource { + get { + if case .commonInlineUserDefinedDataSource(let v)? = _storage._relType {return v} + return Spark_Connect_CommonInlineUserDefinedDataSource() + } + set {_uniqueStorage()._relType = .commonInlineUserDefinedDataSource(newValue)} + } + + var withRelations: Spark_Connect_WithRelations { + get { + if case .withRelations(let v)? = _storage._relType {return v} + return Spark_Connect_WithRelations() + } + set {_uniqueStorage()._relType = .withRelations(newValue)} + } + + var transpose: Spark_Connect_Transpose { + get { + if case .transpose(let v)? = _storage._relType {return v} + return Spark_Connect_Transpose() + } + set {_uniqueStorage()._relType = .transpose(newValue)} + } + + var unresolvedTableValuedFunction: Spark_Connect_UnresolvedTableValuedFunction { + get { + if case .unresolvedTableValuedFunction(let v)? = _storage._relType {return v} + return Spark_Connect_UnresolvedTableValuedFunction() + } + set {_uniqueStorage()._relType = .unresolvedTableValuedFunction(newValue)} + } + + var lateralJoin: Spark_Connect_LateralJoin { + get { + if case .lateralJoin(let v)? = _storage._relType {return v} + return Spark_Connect_LateralJoin() + } + set {_uniqueStorage()._relType = .lateralJoin(newValue)} + } + + /// NA functions + var fillNa: Spark_Connect_NAFill { + get { + if case .fillNa(let v)? = _storage._relType {return v} + return Spark_Connect_NAFill() + } + set {_uniqueStorage()._relType = .fillNa(newValue)} + } + + var dropNa: Spark_Connect_NADrop { + get { + if case .dropNa(let v)? = _storage._relType {return v} + return Spark_Connect_NADrop() + } + set {_uniqueStorage()._relType = .dropNa(newValue)} + } + + var replace: Spark_Connect_NAReplace { + get { + if case .replace(let v)? = _storage._relType {return v} + return Spark_Connect_NAReplace() + } + set {_uniqueStorage()._relType = .replace(newValue)} + } + + /// stat functions + var summary: Spark_Connect_StatSummary { + get { + if case .summary(let v)? = _storage._relType {return v} + return Spark_Connect_StatSummary() + } + set {_uniqueStorage()._relType = .summary(newValue)} + } + + var crosstab: Spark_Connect_StatCrosstab { + get { + if case .crosstab(let v)? = _storage._relType {return v} + return Spark_Connect_StatCrosstab() + } + set {_uniqueStorage()._relType = .crosstab(newValue)} + } + + var describe: Spark_Connect_StatDescribe { + get { + if case .describe(let v)? = _storage._relType {return v} + return Spark_Connect_StatDescribe() + } + set {_uniqueStorage()._relType = .describe(newValue)} + } + + var cov: Spark_Connect_StatCov { + get { + if case .cov(let v)? = _storage._relType {return v} + return Spark_Connect_StatCov() + } + set {_uniqueStorage()._relType = .cov(newValue)} + } + + var corr: Spark_Connect_StatCorr { + get { + if case .corr(let v)? = _storage._relType {return v} + return Spark_Connect_StatCorr() + } + set {_uniqueStorage()._relType = .corr(newValue)} + } + + var approxQuantile: Spark_Connect_StatApproxQuantile { + get { + if case .approxQuantile(let v)? = _storage._relType {return v} + return Spark_Connect_StatApproxQuantile() + } + set {_uniqueStorage()._relType = .approxQuantile(newValue)} + } + + var freqItems: Spark_Connect_StatFreqItems { + get { + if case .freqItems(let v)? = _storage._relType {return v} + return Spark_Connect_StatFreqItems() + } + set {_uniqueStorage()._relType = .freqItems(newValue)} + } + + var sampleBy: Spark_Connect_StatSampleBy { + get { + if case .sampleBy(let v)? = _storage._relType {return v} + return Spark_Connect_StatSampleBy() + } + set {_uniqueStorage()._relType = .sampleBy(newValue)} + } + + /// Catalog API (experimental / unstable) + var catalog: Spark_Connect_Catalog { + get { + if case .catalog(let v)? = _storage._relType {return v} + return Spark_Connect_Catalog() + } + set {_uniqueStorage()._relType = .catalog(newValue)} + } + + /// ML relation + var mlRelation: Spark_Connect_MlRelation { + get { + if case .mlRelation(let v)? = _storage._relType {return v} + return Spark_Connect_MlRelation() + } + set {_uniqueStorage()._relType = .mlRelation(newValue)} + } + + /// This field is used to mark extensions to the protocol. When plugins generate arbitrary + /// relations they can add them here. During the planning the correct resolution is done. + var `extension`: SwiftProtobuf.Google_Protobuf_Any { + get { + if case .extension(let v)? = _storage._relType {return v} + return SwiftProtobuf.Google_Protobuf_Any() + } + set {_uniqueStorage()._relType = .extension(newValue)} + } + + var unknown: Spark_Connect_Unknown { + get { + if case .unknown(let v)? = _storage._relType {return v} + return Spark_Connect_Unknown() + } + set {_uniqueStorage()._relType = .unknown(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_RelType: Equatable, Sendable { + case read(Spark_Connect_Read) + case project(Spark_Connect_Project) + case filter(Spark_Connect_Filter) + case join(Spark_Connect_Join) + case setOp(Spark_Connect_SetOperation) + case sort(Spark_Connect_Sort) + case limit(Spark_Connect_Limit) + case aggregate(Spark_Connect_Aggregate) + case sql(Spark_Connect_SQL) + case localRelation(Spark_Connect_LocalRelation) + case sample(Spark_Connect_Sample) + case offset(Spark_Connect_Offset) + case deduplicate(Spark_Connect_Deduplicate) + case range(Spark_Connect_Range) + case subqueryAlias(Spark_Connect_SubqueryAlias) + case repartition(Spark_Connect_Repartition) + case toDf(Spark_Connect_ToDF) + case withColumnsRenamed(Spark_Connect_WithColumnsRenamed) + case showString(Spark_Connect_ShowString) + case drop(Spark_Connect_Drop) + case tail(Spark_Connect_Tail) + case withColumns(Spark_Connect_WithColumns) + case hint(Spark_Connect_Hint) + case unpivot(Spark_Connect_Unpivot) + case toSchema(Spark_Connect_ToSchema) + case repartitionByExpression(Spark_Connect_RepartitionByExpression) + case mapPartitions(Spark_Connect_MapPartitions) + case collectMetrics(Spark_Connect_CollectMetrics) + case parse(Spark_Connect_Parse) + case groupMap(Spark_Connect_GroupMap) + case coGroupMap(Spark_Connect_CoGroupMap) + case withWatermark(Spark_Connect_WithWatermark) + case applyInPandasWithState(Spark_Connect_ApplyInPandasWithState) + case htmlString(Spark_Connect_HtmlString) + case cachedLocalRelation(Spark_Connect_CachedLocalRelation) + case cachedRemoteRelation(Spark_Connect_CachedRemoteRelation) + case commonInlineUserDefinedTableFunction(Spark_Connect_CommonInlineUserDefinedTableFunction) + case asOfJoin(Spark_Connect_AsOfJoin) + case commonInlineUserDefinedDataSource(Spark_Connect_CommonInlineUserDefinedDataSource) + case withRelations(Spark_Connect_WithRelations) + case transpose(Spark_Connect_Transpose) + case unresolvedTableValuedFunction(Spark_Connect_UnresolvedTableValuedFunction) + case lateralJoin(Spark_Connect_LateralJoin) + /// NA functions + case fillNa(Spark_Connect_NAFill) + case dropNa(Spark_Connect_NADrop) + case replace(Spark_Connect_NAReplace) + /// stat functions + case summary(Spark_Connect_StatSummary) + case crosstab(Spark_Connect_StatCrosstab) + case describe(Spark_Connect_StatDescribe) + case cov(Spark_Connect_StatCov) + case corr(Spark_Connect_StatCorr) + case approxQuantile(Spark_Connect_StatApproxQuantile) + case freqItems(Spark_Connect_StatFreqItems) + case sampleBy(Spark_Connect_StatSampleBy) + /// Catalog API (experimental / unstable) + case catalog(Spark_Connect_Catalog) + /// ML relation + case mlRelation(Spark_Connect_MlRelation) + /// This field is used to mark extensions to the protocol. When plugins generate arbitrary + /// relations they can add them here. During the planning the correct resolution is done. + case `extension`(SwiftProtobuf.Google_Protobuf_Any) + case unknown(Spark_Connect_Unknown) + + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation to represent ML world +struct Spark_Connect_MlRelation: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var mlType: OneOf_MlType? { + get {return _storage._mlType} + set {_uniqueStorage()._mlType = newValue} + } + + var transform: Spark_Connect_MlRelation.Transform { + get { + if case .transform(let v)? = _storage._mlType {return v} + return Spark_Connect_MlRelation.Transform() + } + set {_uniqueStorage()._mlType = .transform(newValue)} + } + + var fetch: Spark_Connect_Fetch { + get { + if case .fetch(let v)? = _storage._mlType {return v} + return Spark_Connect_Fetch() + } + set {_uniqueStorage()._mlType = .fetch(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_MlType: Equatable, Sendable { + case transform(Spark_Connect_MlRelation.Transform) + case fetch(Spark_Connect_Fetch) + + } + + /// Relation to represent transform(input) of the operator + /// which could be a cached model or a new transformer + struct Transform: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var `operator`: OneOf_Operator? { + get {return _storage._operator} + set {_uniqueStorage()._operator = newValue} + } + + /// Object reference + var objRef: Spark_Connect_ObjectRef { + get { + if case .objRef(let v)? = _storage._operator {return v} + return Spark_Connect_ObjectRef() + } + set {_uniqueStorage()._operator = .objRef(newValue)} + } + + /// Could be an ML transformer like VectorAssembler + var transformer: Spark_Connect_MlOperator { + get { + if case .transformer(let v)? = _storage._operator {return v} + return Spark_Connect_MlOperator() + } + set {_uniqueStorage()._operator = .transformer(newValue)} + } + + /// the input dataframe + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// the operator specific parameters + var params: Spark_Connect_MlParams { + get {return _storage._params ?? Spark_Connect_MlParams()} + set {_uniqueStorage()._params = newValue} + } + /// Returns true if `params` has been explicitly set. + var hasParams: Bool {return _storage._params != nil} + /// Clears the value of `params`. Subsequent reads from it will return its default value. + mutating func clearParams() {_uniqueStorage()._params = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Operator: Equatable, Sendable { + /// Object reference + case objRef(Spark_Connect_ObjectRef) + /// Could be an ML transformer like VectorAssembler + case transformer(Spark_Connect_MlOperator) + + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Message for fetching attribute from object on the server side. +/// Fetch can be represented as a Relation or a ML command +/// Command: model.coefficients, model.summary.weightedPrecision which +/// returns the final literal result +/// Relation: model.summary.roc which returns a DataFrame (Relation) +struct Spark_Connect_Fetch: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) reference to the object on the server side + var objRef: Spark_Connect_ObjectRef { + get {return _objRef ?? Spark_Connect_ObjectRef()} + set {_objRef = newValue} + } + /// Returns true if `objRef` has been explicitly set. + var hasObjRef: Bool {return self._objRef != nil} + /// Clears the value of `objRef`. Subsequent reads from it will return its default value. + mutating func clearObjRef() {self._objRef = nil} + + /// (Required) the calling method chains + var methods: [Spark_Connect_Fetch.Method] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// Represents a method with inclusion of method name and its arguments + struct Method: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) the method name + var method: String = String() + + /// (Optional) the arguments of the method + var args: [Spark_Connect_Fetch.Method.Args] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct Args: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var argsType: Spark_Connect_Fetch.Method.Args.OneOf_ArgsType? = nil + + var param: Spark_Connect_Expression.Literal { + get { + if case .param(let v)? = argsType {return v} + return Spark_Connect_Expression.Literal() + } + set {argsType = .param(newValue)} + } + + var input: Spark_Connect_Relation { + get { + if case .input(let v)? = argsType {return v} + return Spark_Connect_Relation() + } + set {argsType = .input(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_ArgsType: Equatable, Sendable { + case param(Spark_Connect_Expression.Literal) + case input(Spark_Connect_Relation) + + } + + init() {} + } + + init() {} + } + + init() {} + + fileprivate var _objRef: Spark_Connect_ObjectRef? = nil +} + +/// Used for testing purposes only. +struct Spark_Connect_Unknown: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Common metadata of all relations. +struct Spark_Connect_RelationCommon: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Shared relation metadata. + /// + /// NOTE: This field was marked as deprecated in the .proto file. + var sourceInfo: String = String() + + /// (Optional) A per-client globally unique id for a given connect plan. + var planID: Int64 { + get {return _planID ?? 0} + set {_planID = newValue} + } + /// Returns true if `planID` has been explicitly set. + var hasPlanID: Bool {return self._planID != nil} + /// Clears the value of `planID`. Subsequent reads from it will return its default value. + mutating func clearPlanID() {self._planID = nil} + + /// (Optional) Keep the information of the origin for this expression such as stacktrace. + var origin: Spark_Connect_Origin { + get {return _origin ?? Spark_Connect_Origin()} + set {_origin = newValue} + } + /// Returns true if `origin` has been explicitly set. + var hasOrigin: Bool {return self._origin != nil} + /// Clears the value of `origin`. Subsequent reads from it will return its default value. + mutating func clearOrigin() {self._origin = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _planID: Int64? = nil + fileprivate var _origin: Spark_Connect_Origin? = nil +} + +/// Relation that uses a SQL query to generate the output. +struct Spark_Connect_SQL: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The SQL query. + var query: String = String() + + /// (Optional) A map of parameter names to literal expressions. + /// + /// NOTE: This field was marked as deprecated in the .proto file. + var args: Dictionary = [:] + + /// (Optional) A sequence of literal expressions for positional parameters in the SQL query text. + /// + /// NOTE: This field was marked as deprecated in the .proto file. + var posArgs: [Spark_Connect_Expression.Literal] = [] + + /// (Optional) A map of parameter names to expressions. + /// It cannot coexist with `pos_arguments`. + var namedArguments: Dictionary = [:] + + /// (Optional) A sequence of expressions for positional parameters in the SQL query text. + /// It cannot coexist with `named_arguments`. + var posArguments: [Spark_Connect_Expression] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Relation of type [[WithRelations]]. +/// +/// This relation contains a root plan, and one or more references that are used by the root plan. +/// There are two ways of referencing a relation, by name (through a subquery alias), or by plan_id +/// (using RelationCommon.plan_id). +/// +/// This relation can be used to implement CTEs, describe DAGs, or to reduce tree depth. +struct Spark_Connect_WithRelations: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Plan at the root of the query tree. This plan is expected to contain one or more + /// references. Those references get expanded later on by the engine. + var root: Spark_Connect_Relation { + get {return _storage._root ?? Spark_Connect_Relation()} + set {_uniqueStorage()._root = newValue} + } + /// Returns true if `root` has been explicitly set. + var hasRoot: Bool {return _storage._root != nil} + /// Clears the value of `root`. Subsequent reads from it will return its default value. + mutating func clearRoot() {_uniqueStorage()._root = nil} + + /// (Required) Plans referenced by the root plan. Relations in this list are also allowed to + /// contain references to other relations in this list, as long they do not form cycles. + var references: [Spark_Connect_Relation] { + get {return _storage._references} + set {_uniqueStorage()._references = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation that reads from a file / table or other data source. Does not have additional +/// inputs. +struct Spark_Connect_Read: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var readType: Spark_Connect_Read.OneOf_ReadType? = nil + + var namedTable: Spark_Connect_Read.NamedTable { + get { + if case .namedTable(let v)? = readType {return v} + return Spark_Connect_Read.NamedTable() + } + set {readType = .namedTable(newValue)} + } + + var dataSource: Spark_Connect_Read.DataSource { + get { + if case .dataSource(let v)? = readType {return v} + return Spark_Connect_Read.DataSource() + } + set {readType = .dataSource(newValue)} + } + + /// (Optional) Indicates if this is a streaming read. + var isStreaming: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_ReadType: Equatable, Sendable { + case namedTable(Spark_Connect_Read.NamedTable) + case dataSource(Spark_Connect_Read.DataSource) + + } + + struct NamedTable: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Unparsed identifier for the table. + var unparsedIdentifier: String = String() + + /// Options for the named table. The map key is case insensitive. + var options: Dictionary = [:] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct DataSource: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) Supported formats include: parquet, orc, text, json, parquet, csv, avro. + /// + /// If not set, the value from SQL conf 'spark.sql.sources.default' will be used. + var format: String { + get {return _format ?? String()} + set {_format = newValue} + } + /// Returns true if `format` has been explicitly set. + var hasFormat: Bool {return self._format != nil} + /// Clears the value of `format`. Subsequent reads from it will return its default value. + mutating func clearFormat() {self._format = nil} + + /// (Optional) If not set, Spark will infer the schema. + /// + /// This schema string should be either DDL-formatted or JSON-formatted. + var schema: String { + get {return _schema ?? String()} + set {_schema = newValue} + } + /// Returns true if `schema` has been explicitly set. + var hasSchema: Bool {return self._schema != nil} + /// Clears the value of `schema`. Subsequent reads from it will return its default value. + mutating func clearSchema() {self._schema = nil} + + /// Options for the data source. The context of this map varies based on the + /// data source format. This options could be empty for valid data source format. + /// The map key is case insensitive. + var options: Dictionary = [:] + + /// (Optional) A list of path for file-system backed data sources. + var paths: [String] = [] + + /// (Optional) Condition in the where clause for each partition. + /// + /// This is only supported by the JDBC data source. + var predicates: [String] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _format: String? = nil + fileprivate var _schema: String? = nil + } + + init() {} +} + +/// Projection of a bag of expressions for a given input relation. +/// +/// The input relation must be specified. +/// The projected expression can be an arbitrary expression. +struct Spark_Connect_Project: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) Input relation is optional for Project. + /// + /// For example, `SELECT ABS(-1)` is valid plan without an input plan. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) A Project requires at least one expression. + var expressions: [Spark_Connect_Expression] { + get {return _storage._expressions} + set {_uniqueStorage()._expressions = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation that applies a boolean expression `condition` on each row of `input` to produce +/// the output result. +struct Spark_Connect_Filter: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for a Filter. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) A Filter must have a condition expression. + var condition: Spark_Connect_Expression { + get {return _storage._condition ?? Spark_Connect_Expression()} + set {_uniqueStorage()._condition = newValue} + } + /// Returns true if `condition` has been explicitly set. + var hasCondition: Bool {return _storage._condition != nil} + /// Clears the value of `condition`. Subsequent reads from it will return its default value. + mutating func clearCondition() {_uniqueStorage()._condition = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[Join]]. +/// +/// `left` and `right` must be present. +struct Spark_Connect_Join: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Left input relation for a Join. + var left: Spark_Connect_Relation { + get {return _storage._left ?? Spark_Connect_Relation()} + set {_uniqueStorage()._left = newValue} + } + /// Returns true if `left` has been explicitly set. + var hasLeft: Bool {return _storage._left != nil} + /// Clears the value of `left`. Subsequent reads from it will return its default value. + mutating func clearLeft() {_uniqueStorage()._left = nil} + + /// (Required) Right input relation for a Join. + var right: Spark_Connect_Relation { + get {return _storage._right ?? Spark_Connect_Relation()} + set {_uniqueStorage()._right = newValue} + } + /// Returns true if `right` has been explicitly set. + var hasRight: Bool {return _storage._right != nil} + /// Clears the value of `right`. Subsequent reads from it will return its default value. + mutating func clearRight() {_uniqueStorage()._right = nil} + + /// (Optional) The join condition. Could be unset when `using_columns` is utilized. + /// + /// This field does not co-exist with using_columns. + var joinCondition: Spark_Connect_Expression { + get {return _storage._joinCondition ?? Spark_Connect_Expression()} + set {_uniqueStorage()._joinCondition = newValue} + } + /// Returns true if `joinCondition` has been explicitly set. + var hasJoinCondition: Bool {return _storage._joinCondition != nil} + /// Clears the value of `joinCondition`. Subsequent reads from it will return its default value. + mutating func clearJoinCondition() {_uniqueStorage()._joinCondition = nil} + + /// (Required) The join type. + var joinType: Spark_Connect_Join.JoinType { + get {return _storage._joinType} + set {_uniqueStorage()._joinType = newValue} + } + + /// Optional. using_columns provides a list of columns that should present on both sides of + /// the join inputs that this Join will join on. For example A JOIN B USING col_name is + /// equivalent to A JOIN B on A.col_name = B.col_name. + /// + /// This field does not co-exist with join_condition. + var usingColumns: [String] { + get {return _storage._usingColumns} + set {_uniqueStorage()._usingColumns = newValue} + } + + /// (Optional) Only used by joinWith. Set the left and right join data types. + var joinDataType: Spark_Connect_Join.JoinDataType { + get {return _storage._joinDataType ?? Spark_Connect_Join.JoinDataType()} + set {_uniqueStorage()._joinDataType = newValue} + } + /// Returns true if `joinDataType` has been explicitly set. + var hasJoinDataType: Bool {return _storage._joinDataType != nil} + /// Clears the value of `joinDataType`. Subsequent reads from it will return its default value. + mutating func clearJoinDataType() {_uniqueStorage()._joinDataType = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum JoinType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + case inner // = 1 + case fullOuter // = 2 + case leftOuter // = 3 + case rightOuter // = 4 + case leftAnti // = 5 + case leftSemi // = 6 + case cross // = 7 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .inner + case 2: self = .fullOuter + case 3: self = .leftOuter + case 4: self = .rightOuter + case 5: self = .leftAnti + case 6: self = .leftSemi + case 7: self = .cross + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .inner: return 1 + case .fullOuter: return 2 + case .leftOuter: return 3 + case .rightOuter: return 4 + case .leftAnti: return 5 + case .leftSemi: return 6 + case .cross: return 7 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_Join.JoinType] = [ + .unspecified, + .inner, + .fullOuter, + .leftOuter, + .rightOuter, + .leftAnti, + .leftSemi, + .cross, + ] + + } + + struct JoinDataType: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// If the left data type is a struct. + var isLeftStruct: Bool = false + + /// If the right data type is a struct. + var isRightStruct: Bool = false + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[SetOperation]] +struct Spark_Connect_SetOperation: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Left input relation for a Set operation. + var leftInput: Spark_Connect_Relation { + get {return _storage._leftInput ?? Spark_Connect_Relation()} + set {_uniqueStorage()._leftInput = newValue} + } + /// Returns true if `leftInput` has been explicitly set. + var hasLeftInput: Bool {return _storage._leftInput != nil} + /// Clears the value of `leftInput`. Subsequent reads from it will return its default value. + mutating func clearLeftInput() {_uniqueStorage()._leftInput = nil} + + /// (Required) Right input relation for a Set operation. + var rightInput: Spark_Connect_Relation { + get {return _storage._rightInput ?? Spark_Connect_Relation()} + set {_uniqueStorage()._rightInput = newValue} + } + /// Returns true if `rightInput` has been explicitly set. + var hasRightInput: Bool {return _storage._rightInput != nil} + /// Clears the value of `rightInput`. Subsequent reads from it will return its default value. + mutating func clearRightInput() {_uniqueStorage()._rightInput = nil} + + /// (Required) The Set operation type. + var setOpType: Spark_Connect_SetOperation.SetOpType { + get {return _storage._setOpType} + set {_uniqueStorage()._setOpType = newValue} + } + + /// (Optional) If to remove duplicate rows. + /// + /// True to preserve all results. + /// False to remove duplicate rows. + var isAll: Bool { + get {return _storage._isAll ?? false} + set {_uniqueStorage()._isAll = newValue} + } + /// Returns true if `isAll` has been explicitly set. + var hasIsAll: Bool {return _storage._isAll != nil} + /// Clears the value of `isAll`. Subsequent reads from it will return its default value. + mutating func clearIsAll() {_uniqueStorage()._isAll = nil} + + /// (Optional) If to perform the Set operation based on name resolution. + /// + /// Only UNION supports this option. + var byName: Bool { + get {return _storage._byName ?? false} + set {_uniqueStorage()._byName = newValue} + } + /// Returns true if `byName` has been explicitly set. + var hasByName: Bool {return _storage._byName != nil} + /// Clears the value of `byName`. Subsequent reads from it will return its default value. + mutating func clearByName() {_uniqueStorage()._byName = nil} + + /// (Optional) If to perform the Set operation and allow missing columns. + /// + /// Only UNION supports this option. + var allowMissingColumns: Bool { + get {return _storage._allowMissingColumns ?? false} + set {_uniqueStorage()._allowMissingColumns = newValue} + } + /// Returns true if `allowMissingColumns` has been explicitly set. + var hasAllowMissingColumns: Bool {return _storage._allowMissingColumns != nil} + /// Clears the value of `allowMissingColumns`. Subsequent reads from it will return its default value. + mutating func clearAllowMissingColumns() {_uniqueStorage()._allowMissingColumns = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum SetOpType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + case intersect // = 1 + case union // = 2 + case except // = 3 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .intersect + case 2: self = .union + case 3: self = .except + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .intersect: return 1 + case .union: return 2 + case .except: return 3 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_SetOperation.SetOpType] = [ + .unspecified, + .intersect, + .union, + .except, + ] + + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[Limit]] that is used to `limit` rows from the input relation. +struct Spark_Connect_Limit: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for a Limit. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) the limit. + var limit: Int32 { + get {return _storage._limit} + set {_uniqueStorage()._limit = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[Offset]] that is used to read rows staring from the `offset` on +/// the input relation. +struct Spark_Connect_Offset: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for an Offset. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) the limit. + var offset: Int32 { + get {return _storage._offset} + set {_uniqueStorage()._offset = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[Tail]] that is used to fetch `limit` rows from the last of the input relation. +struct Spark_Connect_Tail: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for an Tail. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) the limit. + var limit: Int32 { + get {return _storage._limit} + set {_uniqueStorage()._limit = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[Aggregate]]. +struct Spark_Connect_Aggregate: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for a RelationalGroupedDataset. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) How the RelationalGroupedDataset was built. + var groupType: Spark_Connect_Aggregate.GroupType { + get {return _storage._groupType} + set {_uniqueStorage()._groupType = newValue} + } + + /// (Required) Expressions for grouping keys + var groupingExpressions: [Spark_Connect_Expression] { + get {return _storage._groupingExpressions} + set {_uniqueStorage()._groupingExpressions = newValue} + } + + /// (Required) List of values that will be translated to columns in the output DataFrame. + var aggregateExpressions: [Spark_Connect_Expression] { + get {return _storage._aggregateExpressions} + set {_uniqueStorage()._aggregateExpressions = newValue} + } + + /// (Optional) Pivots a column of the current `DataFrame` and performs the specified aggregation. + var pivot: Spark_Connect_Aggregate.Pivot { + get {return _storage._pivot ?? Spark_Connect_Aggregate.Pivot()} + set {_uniqueStorage()._pivot = newValue} + } + /// Returns true if `pivot` has been explicitly set. + var hasPivot: Bool {return _storage._pivot != nil} + /// Clears the value of `pivot`. Subsequent reads from it will return its default value. + mutating func clearPivot() {_uniqueStorage()._pivot = nil} + + /// (Optional) List of values that will be translated to columns in the output DataFrame. + var groupingSets: [Spark_Connect_Aggregate.GroupingSets] { + get {return _storage._groupingSets} + set {_uniqueStorage()._groupingSets = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum GroupType: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + case groupby // = 1 + case rollup // = 2 + case cube // = 3 + case pivot // = 4 + case groupingSets // = 5 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .groupby + case 2: self = .rollup + case 3: self = .cube + case 4: self = .pivot + case 5: self = .groupingSets + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .groupby: return 1 + case .rollup: return 2 + case .cube: return 3 + case .pivot: return 4 + case .groupingSets: return 5 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_Aggregate.GroupType] = [ + .unspecified, + .groupby, + .rollup, + .cube, + .pivot, + .groupingSets, + ] + + } + + struct Pivot: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The column to pivot + var col: Spark_Connect_Expression { + get {return _col ?? Spark_Connect_Expression()} + set {_col = newValue} + } + /// Returns true if `col` has been explicitly set. + var hasCol: Bool {return self._col != nil} + /// Clears the value of `col`. Subsequent reads from it will return its default value. + mutating func clearCol() {self._col = nil} + + /// (Optional) List of values that will be translated to columns in the output DataFrame. + /// + /// Note that if it is empty, the server side will immediately trigger a job to collect + /// the distinct values of the column. + var values: [Spark_Connect_Expression.Literal] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _col: Spark_Connect_Expression? = nil + } + + struct GroupingSets: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Individual grouping set + var groupingSet: [Spark_Connect_Expression] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[Sort]]. +struct Spark_Connect_Sort: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for a Sort. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The ordering expressions + var order: [Spark_Connect_Expression.SortOrder] { + get {return _storage._order} + set {_uniqueStorage()._order = newValue} + } + + /// (Optional) if this is a global sort. + var isGlobal: Bool { + get {return _storage._isGlobal ?? false} + set {_uniqueStorage()._isGlobal = newValue} + } + /// Returns true if `isGlobal` has been explicitly set. + var hasIsGlobal: Bool {return _storage._isGlobal != nil} + /// Clears the value of `isGlobal`. Subsequent reads from it will return its default value. + mutating func clearIsGlobal() {_uniqueStorage()._isGlobal = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Drop specified columns. +struct Spark_Connect_Drop: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Optional) columns to drop. + var columns: [Spark_Connect_Expression] { + get {return _storage._columns} + set {_uniqueStorage()._columns = newValue} + } + + /// (Optional) names of columns to drop. + var columnNames: [String] { + get {return _storage._columnNames} + set {_uniqueStorage()._columnNames = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[Deduplicate]] which have duplicate rows removed, could consider either only +/// the subset of columns or all the columns. +struct Spark_Connect_Deduplicate: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for a Deduplicate. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Optional) Deduplicate based on a list of column names. + /// + /// This field does not co-use with `all_columns_as_keys`. + var columnNames: [String] { + get {return _storage._columnNames} + set {_uniqueStorage()._columnNames = newValue} + } + + /// (Optional) Deduplicate based on all the columns of the input relation. + /// + /// This field does not co-use with `column_names`. + var allColumnsAsKeys: Bool { + get {return _storage._allColumnsAsKeys ?? false} + set {_uniqueStorage()._allColumnsAsKeys = newValue} + } + /// Returns true if `allColumnsAsKeys` has been explicitly set. + var hasAllColumnsAsKeys: Bool {return _storage._allColumnsAsKeys != nil} + /// Clears the value of `allColumnsAsKeys`. Subsequent reads from it will return its default value. + mutating func clearAllColumnsAsKeys() {_uniqueStorage()._allColumnsAsKeys = nil} + + /// (Optional) Deduplicate within the time range of watermark. + var withinWatermark: Bool { + get {return _storage._withinWatermark ?? false} + set {_uniqueStorage()._withinWatermark = newValue} + } + /// Returns true if `withinWatermark` has been explicitly set. + var hasWithinWatermark: Bool {return _storage._withinWatermark != nil} + /// Clears the value of `withinWatermark`. Subsequent reads from it will return its default value. + mutating func clearWithinWatermark() {_uniqueStorage()._withinWatermark = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// A relation that does not need to be qualified by name. +struct Spark_Connect_LocalRelation: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) Local collection data serialized into Arrow IPC streaming format which contains + /// the schema of the data. + var data: Data { + get {return _data ?? Data()} + set {_data = newValue} + } + /// Returns true if `data` has been explicitly set. + var hasData: Bool {return self._data != nil} + /// Clears the value of `data`. Subsequent reads from it will return its default value. + mutating func clearData() {self._data = nil} + + /// (Optional) The schema of local data. + /// It should be either a DDL-formatted type string or a JSON string. + /// + /// The server side will update the column names and data types according to this schema. + /// If the 'data' is not provided, then this schema will be required. + var schema: String { + get {return _schema ?? String()} + set {_schema = newValue} + } + /// Returns true if `schema` has been explicitly set. + var hasSchema: Bool {return self._schema != nil} + /// Clears the value of `schema`. Subsequent reads from it will return its default value. + mutating func clearSchema() {self._schema = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _data: Data? = nil + fileprivate var _schema: String? = nil +} + +/// A local relation that has been cached already. +struct Spark_Connect_CachedLocalRelation: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) A sha-256 hash of the serialized local relation in proto, see LocalRelation. + var hash: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Represents a remote relation that has been cached on server. +struct Spark_Connect_CachedRemoteRelation: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) ID of the remote related (assigned by the service). + var relationID: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Relation of type [[Sample]] that samples a fraction of the dataset. +struct Spark_Connect_Sample: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for a Sample. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) lower bound. + var lowerBound: Double { + get {return _storage._lowerBound} + set {_uniqueStorage()._lowerBound = newValue} + } + + /// (Required) upper bound. + var upperBound: Double { + get {return _storage._upperBound} + set {_uniqueStorage()._upperBound = newValue} + } + + /// (Optional) Whether to sample with replacement. + var withReplacement: Bool { + get {return _storage._withReplacement ?? false} + set {_uniqueStorage()._withReplacement = newValue} + } + /// Returns true if `withReplacement` has been explicitly set. + var hasWithReplacement: Bool {return _storage._withReplacement != nil} + /// Clears the value of `withReplacement`. Subsequent reads from it will return its default value. + mutating func clearWithReplacement() {_uniqueStorage()._withReplacement = nil} + + /// (Required) The random seed. + /// This field is required to avoid generating mutable dataframes (see SPARK-48184 for details), + /// however, still keep it 'optional' here for backward compatibility. + var seed: Int64 { + get {return _storage._seed ?? 0} + set {_uniqueStorage()._seed = newValue} + } + /// Returns true if `seed` has been explicitly set. + var hasSeed: Bool {return _storage._seed != nil} + /// Clears the value of `seed`. Subsequent reads from it will return its default value. + mutating func clearSeed() {_uniqueStorage()._seed = nil} + + /// (Required) Explicitly sort the underlying plan to make the ordering deterministic or cache it. + /// This flag is true when invoking `dataframe.randomSplit` to randomly splits DataFrame with the + /// provided weights. Otherwise, it is false. + var deterministicOrder: Bool { + get {return _storage._deterministicOrder} + set {_uniqueStorage()._deterministicOrder = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[Range]] that generates a sequence of integers. +struct Spark_Connect_Range: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) Default value = 0 + var start: Int64 { + get {return _start ?? 0} + set {_start = newValue} + } + /// Returns true if `start` has been explicitly set. + var hasStart: Bool {return self._start != nil} + /// Clears the value of `start`. Subsequent reads from it will return its default value. + mutating func clearStart() {self._start = nil} + + /// (Required) + var end: Int64 = 0 + + /// (Required) + var step: Int64 = 0 + + /// Optional. Default value is assigned by 1) SQL conf "spark.sql.leafNodeDefaultParallelism" if + /// it is set, or 2) spark default parallelism. + var numPartitions: Int32 { + get {return _numPartitions ?? 0} + set {_numPartitions = newValue} + } + /// Returns true if `numPartitions` has been explicitly set. + var hasNumPartitions: Bool {return self._numPartitions != nil} + /// Clears the value of `numPartitions`. Subsequent reads from it will return its default value. + mutating func clearNumPartitions() {self._numPartitions = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _start: Int64? = nil + fileprivate var _numPartitions: Int32? = nil +} + +/// Relation alias. +struct Spark_Connect_SubqueryAlias: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation of SubqueryAlias. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The alias. + var alias: String { + get {return _storage._alias} + set {_uniqueStorage()._alias = newValue} + } + + /// (Optional) Qualifier of the alias. + var qualifier: [String] { + get {return _storage._qualifier} + set {_uniqueStorage()._qualifier = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation repartition. +struct Spark_Connect_Repartition: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation of Repartition. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Must be positive. + var numPartitions: Int32 { + get {return _storage._numPartitions} + set {_uniqueStorage()._numPartitions = newValue} + } + + /// (Optional) Default value is false. + var shuffle: Bool { + get {return _storage._shuffle ?? false} + set {_uniqueStorage()._shuffle = newValue} + } + /// Returns true if `shuffle` has been explicitly set. + var hasShuffle: Bool {return _storage._shuffle != nil} + /// Clears the value of `shuffle`. Subsequent reads from it will return its default value. + mutating func clearShuffle() {_uniqueStorage()._shuffle = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Compose the string representing rows for output. +/// It will invoke 'Dataset.showString' to compute the results. +struct Spark_Connect_ShowString: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Number of rows to show. + var numRows: Int32 { + get {return _storage._numRows} + set {_uniqueStorage()._numRows = newValue} + } + + /// (Required) If set to more than 0, truncates strings to + /// `truncate` characters and all cells will be aligned right. + var truncate: Int32 { + get {return _storage._truncate} + set {_uniqueStorage()._truncate = newValue} + } + + /// (Required) If set to true, prints output rows vertically (one line per column value). + var vertical: Bool { + get {return _storage._vertical} + set {_uniqueStorage()._vertical = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Compose the string representing rows for output. +/// It will invoke 'Dataset.htmlString' to compute the results. +struct Spark_Connect_HtmlString: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Number of rows to show. + var numRows: Int32 { + get {return _storage._numRows} + set {_uniqueStorage()._numRows = newValue} + } + + /// (Required) If set to more than 0, truncates strings to + /// `truncate` characters and all cells will be aligned right. + var truncate: Int32 { + get {return _storage._truncate} + set {_uniqueStorage()._truncate = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Computes specified statistics for numeric and string columns. +/// It will invoke 'Dataset.summary' (same as 'StatFunctions.summary') +/// to compute the results. +struct Spark_Connect_StatSummary: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Optional) Statistics from to be computed. + /// + /// Available statistics are: + /// count + /// mean + /// stddev + /// min + /// max + /// arbitrary approximate percentiles specified as a percentage (e.g. 75%) + /// count_distinct + /// approx_count_distinct + /// + /// If no statistics are given, this function computes 'count', 'mean', 'stddev', 'min', + /// 'approximate quartiles' (percentiles at 25%, 50%, and 75%), and 'max'. + var statistics: [String] { + get {return _storage._statistics} + set {_uniqueStorage()._statistics = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Computes basic statistics for numeric and string columns, including count, mean, stddev, min, +/// and max. If no columns are given, this function computes statistics for all numerical or +/// string columns. +struct Spark_Connect_StatDescribe: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Optional) Columns to compute statistics on. + var cols: [String] { + get {return _storage._cols} + set {_uniqueStorage()._cols = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Computes a pair-wise frequency table of the given columns. Also known as a contingency table. +/// It will invoke 'Dataset.stat.crosstab' (same as 'StatFunctions.crossTabulate') +/// to compute the results. +struct Spark_Connect_StatCrosstab: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The name of the first column. + /// + /// Distinct items will make the first item of each row. + var col1: String { + get {return _storage._col1} + set {_uniqueStorage()._col1 = newValue} + } + + /// (Required) The name of the second column. + /// + /// Distinct items will make the column names of the DataFrame. + var col2: String { + get {return _storage._col2} + set {_uniqueStorage()._col2 = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Calculate the sample covariance of two numerical columns of a DataFrame. +/// It will invoke 'Dataset.stat.cov' (same as 'StatFunctions.calculateCov') to compute the results. +struct Spark_Connect_StatCov: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The name of the first column. + var col1: String { + get {return _storage._col1} + set {_uniqueStorage()._col1 = newValue} + } + + /// (Required) The name of the second column. + var col2: String { + get {return _storage._col2} + set {_uniqueStorage()._col2 = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Calculates the correlation of two columns of a DataFrame. Currently only supports the Pearson +/// Correlation Coefficient. It will invoke 'Dataset.stat.corr' (same as +/// 'StatFunctions.pearsonCorrelation') to compute the results. +struct Spark_Connect_StatCorr: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The name of the first column. + var col1: String { + get {return _storage._col1} + set {_uniqueStorage()._col1 = newValue} + } + + /// (Required) The name of the second column. + var col2: String { + get {return _storage._col2} + set {_uniqueStorage()._col2 = newValue} + } + + /// (Optional) Default value is 'pearson'. + /// + /// Currently only supports the Pearson Correlation Coefficient. + var method: String { + get {return _storage._method ?? String()} + set {_uniqueStorage()._method = newValue} + } + /// Returns true if `method` has been explicitly set. + var hasMethod: Bool {return _storage._method != nil} + /// Clears the value of `method`. Subsequent reads from it will return its default value. + mutating func clearMethod() {_uniqueStorage()._method = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Calculates the approximate quantiles of numerical columns of a DataFrame. +/// It will invoke 'Dataset.stat.approxQuantile' (same as 'StatFunctions.approxQuantile') +/// to compute the results. +struct Spark_Connect_StatApproxQuantile: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The names of the numerical columns. + var cols: [String] { + get {return _storage._cols} + set {_uniqueStorage()._cols = newValue} + } + + /// (Required) A list of quantile probabilities. + /// + /// Each number must belong to [0, 1]. + /// For example 0 is the minimum, 0.5 is the median, 1 is the maximum. + var probabilities: [Double] { + get {return _storage._probabilities} + set {_uniqueStorage()._probabilities = newValue} + } + + /// (Required) The relative target precision to achieve (greater than or equal to 0). + /// + /// If set to zero, the exact quantiles are computed, which could be very expensive. + /// Note that values greater than 1 are accepted but give the same result as 1. + var relativeError: Double { + get {return _storage._relativeError} + set {_uniqueStorage()._relativeError = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Finding frequent items for columns, possibly with false positives. +/// It will invoke 'Dataset.stat.freqItems' (same as 'StatFunctions.freqItems') +/// to compute the results. +struct Spark_Connect_StatFreqItems: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The names of the columns to search frequent items in. + var cols: [String] { + get {return _storage._cols} + set {_uniqueStorage()._cols = newValue} + } + + /// (Optional) The minimum frequency for an item to be considered `frequent`. + /// Should be greater than 1e-4. + var support: Double { + get {return _storage._support ?? 0} + set {_uniqueStorage()._support = newValue} + } + /// Returns true if `support` has been explicitly set. + var hasSupport: Bool {return _storage._support != nil} + /// Clears the value of `support`. Subsequent reads from it will return its default value. + mutating func clearSupport() {_uniqueStorage()._support = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Returns a stratified sample without replacement based on the fraction +/// given on each stratum. +/// It will invoke 'Dataset.stat.freqItems' (same as 'StatFunctions.freqItems') +/// to compute the results. +struct Spark_Connect_StatSampleBy: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The column that defines strata. + var col: Spark_Connect_Expression { + get {return _storage._col ?? Spark_Connect_Expression()} + set {_uniqueStorage()._col = newValue} + } + /// Returns true if `col` has been explicitly set. + var hasCol: Bool {return _storage._col != nil} + /// Clears the value of `col`. Subsequent reads from it will return its default value. + mutating func clearCol() {_uniqueStorage()._col = nil} + + /// (Required) Sampling fraction for each stratum. + /// + /// If a stratum is not specified, we treat its fraction as zero. + var fractions: [Spark_Connect_StatSampleBy.Fraction] { + get {return _storage._fractions} + set {_uniqueStorage()._fractions = newValue} + } + + /// (Required) The random seed. + /// This field is required to avoid generating mutable dataframes (see SPARK-48184 for details), + /// however, still keep it 'optional' here for backward compatibility. + var seed: Int64 { + get {return _storage._seed ?? 0} + set {_uniqueStorage()._seed = newValue} + } + /// Returns true if `seed` has been explicitly set. + var hasSeed: Bool {return _storage._seed != nil} + /// Clears the value of `seed`. Subsequent reads from it will return its default value. + mutating func clearSeed() {_uniqueStorage()._seed = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct Fraction: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The stratum. + var stratum: Spark_Connect_Expression.Literal { + get {return _stratum ?? Spark_Connect_Expression.Literal()} + set {_stratum = newValue} + } + /// Returns true if `stratum` has been explicitly set. + var hasStratum: Bool {return self._stratum != nil} + /// Clears the value of `stratum`. Subsequent reads from it will return its default value. + mutating func clearStratum() {self._stratum = nil} + + /// (Required) The fraction value. Must be in [0, 1]. + var fraction: Double = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _stratum: Spark_Connect_Expression.Literal? = nil + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Replaces null values. +/// It will invoke 'Dataset.na.fill' (same as 'DataFrameNaFunctions.fill') to compute the results. +/// Following 3 parameter combinations are supported: +/// 1, 'values' only contains 1 item, 'cols' is empty: +/// replaces null values in all type-compatible columns. +/// 2, 'values' only contains 1 item, 'cols' is not empty: +/// replaces null values in specified columns. +/// 3, 'values' contains more than 1 items, then 'cols' is required to have the same length: +/// replaces each specified column with corresponding value. +struct Spark_Connect_NAFill: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Optional) Optional list of column names to consider. + var cols: [String] { + get {return _storage._cols} + set {_uniqueStorage()._cols = newValue} + } + + /// (Required) Values to replace null values with. + /// + /// Should contain at least 1 item. + /// Only 4 data types are supported now: bool, long, double, string + var values: [Spark_Connect_Expression.Literal] { + get {return _storage._values} + set {_uniqueStorage()._values = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Drop rows containing null values. +/// It will invoke 'Dataset.na.drop' (same as 'DataFrameNaFunctions.drop') to compute the results. +struct Spark_Connect_NADrop: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Optional) Optional list of column names to consider. + /// + /// When it is empty, all the columns in the input relation will be considered. + var cols: [String] { + get {return _storage._cols} + set {_uniqueStorage()._cols = newValue} + } + + /// (Optional) The minimum number of non-null and non-NaN values required to keep. + /// + /// When not set, it is equivalent to the number of considered columns, which means + /// a row will be kept only if all columns are non-null. + /// + /// 'how' options ('all', 'any') can be easily converted to this field: + /// - 'all' -> set 'min_non_nulls' 1; + /// - 'any' -> keep 'min_non_nulls' unset; + var minNonNulls: Int32 { + get {return _storage._minNonNulls ?? 0} + set {_uniqueStorage()._minNonNulls = newValue} + } + /// Returns true if `minNonNulls` has been explicitly set. + var hasMinNonNulls: Bool {return _storage._minNonNulls != nil} + /// Clears the value of `minNonNulls`. Subsequent reads from it will return its default value. + mutating func clearMinNonNulls() {_uniqueStorage()._minNonNulls = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Replaces old values with the corresponding values. +/// It will invoke 'Dataset.na.replace' (same as 'DataFrameNaFunctions.replace') +/// to compute the results. +struct Spark_Connect_NAReplace: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Optional) List of column names to consider. + /// + /// When it is empty, all the type-compatible columns in the input relation will be considered. + var cols: [String] { + get {return _storage._cols} + set {_uniqueStorage()._cols = newValue} + } + + /// (Optional) The value replacement mapping. + var replacements: [Spark_Connect_NAReplace.Replacement] { + get {return _storage._replacements} + set {_uniqueStorage()._replacements = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct Replacement: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The old value. + /// + /// Only 4 data types are supported now: null, bool, double, string. + var oldValue: Spark_Connect_Expression.Literal { + get {return _oldValue ?? Spark_Connect_Expression.Literal()} + set {_oldValue = newValue} + } + /// Returns true if `oldValue` has been explicitly set. + var hasOldValue: Bool {return self._oldValue != nil} + /// Clears the value of `oldValue`. Subsequent reads from it will return its default value. + mutating func clearOldValue() {self._oldValue = nil} + + /// (Required) The new value. + /// + /// Should be of the same data type with the old value. + var newValue: Spark_Connect_Expression.Literal { + get {return _newValue ?? Spark_Connect_Expression.Literal()} + set {_newValue = newValue} + } + /// Returns true if `newValue` has been explicitly set. + var hasNewValue: Bool {return self._newValue != nil} + /// Clears the value of `newValue`. Subsequent reads from it will return its default value. + mutating func clearNewValue() {self._newValue = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _oldValue: Spark_Connect_Expression.Literal? = nil + fileprivate var _newValue: Spark_Connect_Expression.Literal? = nil + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Rename columns on the input relation by the same length of names. +struct Spark_Connect_ToDF: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation of RenameColumnsBySameLengthNames. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) + /// + /// The number of columns of the input relation must be equal to the length + /// of this field. If this is not true, an exception will be returned. + var columnNames: [String] { + get {return _storage._columnNames} + set {_uniqueStorage()._columnNames = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Rename columns on the input relation by a map with name to name mapping. +struct Spark_Connect_WithColumnsRenamed: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Optional) + /// + /// Renaming column names of input relation from A to B where A is the map key + /// and B is the map value. This is a no-op if schema doesn't contain any A. It + /// does not require that all input relation column names to present as keys. + /// duplicated B are not allowed. + /// + /// NOTE: This field was marked as deprecated in the .proto file. + var renameColumnsMap: Dictionary { + get {return _storage._renameColumnsMap} + set {_uniqueStorage()._renameColumnsMap = newValue} + } + + var renames: [Spark_Connect_WithColumnsRenamed.Rename] { + get {return _storage._renames} + set {_uniqueStorage()._renames = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct Rename: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The existing column name. + var colName: String = String() + + /// (Required) The new column name. + var newColName: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Adding columns or replacing the existing columns that have the same names. +struct Spark_Connect_WithColumns: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) + /// + /// Given a column name, apply the corresponding expression on the column. If column + /// name exists in the input relation, then replace the column. If the column name + /// does not exist in the input relation, then adds it as a new column. + /// + /// Only one name part is expected from each Expression.Alias. + /// + /// An exception is thrown when duplicated names are present in the mapping. + var aliases: [Spark_Connect_Expression.Alias] { + get {return _storage._aliases} + set {_uniqueStorage()._aliases = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_WithWatermark: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Name of the column containing event time. + var eventTime: String { + get {return _storage._eventTime} + set {_uniqueStorage()._eventTime = newValue} + } + + /// (Required) + var delayThreshold: String { + get {return _storage._delayThreshold} + set {_uniqueStorage()._delayThreshold = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Specify a hint over a relation. Hint should have a name and optional parameters. +struct Spark_Connect_Hint: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Hint name. + /// + /// Supported Join hints include BROADCAST, MERGE, SHUFFLE_HASH, SHUFFLE_REPLICATE_NL. + /// + /// Supported partitioning hints include COALESCE, REPARTITION, REPARTITION_BY_RANGE. + var name: String { + get {return _storage._name} + set {_uniqueStorage()._name = newValue} + } + + /// (Optional) Hint parameters. + var parameters: [Spark_Connect_Expression] { + get {return _storage._parameters} + set {_uniqueStorage()._parameters = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Unpivot a DataFrame from wide format to long format, optionally leaving identifier columns set. +struct Spark_Connect_Unpivot: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Id columns. + var ids: [Spark_Connect_Expression] { + get {return _storage._ids} + set {_uniqueStorage()._ids = newValue} + } + + /// (Optional) Value columns to unpivot. + var values: Spark_Connect_Unpivot.Values { + get {return _storage._values ?? Spark_Connect_Unpivot.Values()} + set {_uniqueStorage()._values = newValue} + } + /// Returns true if `values` has been explicitly set. + var hasValues: Bool {return _storage._values != nil} + /// Clears the value of `values`. Subsequent reads from it will return its default value. + mutating func clearValues() {_uniqueStorage()._values = nil} + + /// (Required) Name of the variable column. + var variableColumnName: String { + get {return _storage._variableColumnName} + set {_uniqueStorage()._variableColumnName = newValue} + } + + /// (Required) Name of the value column. + var valueColumnName: String { + get {return _storage._valueColumnName} + set {_uniqueStorage()._valueColumnName = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + struct Values: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var values: [Spark_Connect_Expression] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Transpose a DataFrame, switching rows to columns. +/// Transforms the DataFrame such that the values in the specified index column +/// become the new columns of the DataFrame. +struct Spark_Connect_Transpose: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Optional) A list of columns that will be treated as the indices. + /// Only single column is supported now. + var indexColumns: [Spark_Connect_Expression] { + get {return _storage._indexColumns} + set {_uniqueStorage()._indexColumns = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_UnresolvedTableValuedFunction: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) name (or unparsed name for user defined function) for the unresolved function. + var functionName: String = String() + + /// (Optional) Function arguments. Empty arguments are allowed. + var arguments: [Spark_Connect_Expression] = [] + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +struct Spark_Connect_ToSchema: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The user provided schema. + /// + /// The Sever side will update the dataframe with this schema. + var schema: Spark_Connect_DataType { + get {return _storage._schema ?? Spark_Connect_DataType()} + set {_uniqueStorage()._schema = newValue} + } + /// Returns true if `schema` has been explicitly set. + var hasSchema: Bool {return _storage._schema != nil} + /// Clears the value of `schema`. Subsequent reads from it will return its default value. + mutating func clearSchema() {_uniqueStorage()._schema = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_RepartitionByExpression: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The partitioning expressions. + var partitionExprs: [Spark_Connect_Expression] { + get {return _storage._partitionExprs} + set {_uniqueStorage()._partitionExprs = newValue} + } + + /// (Optional) number of partitions, must be positive. + var numPartitions: Int32 { + get {return _storage._numPartitions ?? 0} + set {_uniqueStorage()._numPartitions = newValue} + } + /// Returns true if `numPartitions` has been explicitly set. + var hasNumPartitions: Bool {return _storage._numPartitions != nil} + /// Clears the value of `numPartitions`. Subsequent reads from it will return its default value. + mutating func clearNumPartitions() {_uniqueStorage()._numPartitions = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_MapPartitions: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for a mapPartitions-equivalent API: mapInPandas, mapInArrow. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Input user-defined function. + var `func`: Spark_Connect_CommonInlineUserDefinedFunction { + get {return _storage._func ?? Spark_Connect_CommonInlineUserDefinedFunction()} + set {_uniqueStorage()._func = newValue} + } + /// Returns true if ``func`` has been explicitly set. + var hasFunc: Bool {return _storage._func != nil} + /// Clears the value of ``func``. Subsequent reads from it will return its default value. + mutating func clearFunc() {_uniqueStorage()._func = nil} + + /// (Optional) Whether to use barrier mode execution or not. + var isBarrier: Bool { + get {return _storage._isBarrier ?? false} + set {_uniqueStorage()._isBarrier = newValue} + } + /// Returns true if `isBarrier` has been explicitly set. + var hasIsBarrier: Bool {return _storage._isBarrier != nil} + /// Clears the value of `isBarrier`. Subsequent reads from it will return its default value. + mutating func clearIsBarrier() {_uniqueStorage()._isBarrier = nil} + + /// (Optional) ResourceProfile id used for the stage level scheduling. + var profileID: Int32 { + get {return _storage._profileID ?? 0} + set {_uniqueStorage()._profileID = newValue} + } + /// Returns true if `profileID` has been explicitly set. + var hasProfileID: Bool {return _storage._profileID != nil} + /// Clears the value of `profileID`. Subsequent reads from it will return its default value. + mutating func clearProfileID() {_uniqueStorage()._profileID = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_GroupMap: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for Group Map API: apply, applyInPandas. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Expressions for grouping keys. + var groupingExpressions: [Spark_Connect_Expression] { + get {return _storage._groupingExpressions} + set {_uniqueStorage()._groupingExpressions = newValue} + } + + /// (Required) Input user-defined function. + var `func`: Spark_Connect_CommonInlineUserDefinedFunction { + get {return _storage._func ?? Spark_Connect_CommonInlineUserDefinedFunction()} + set {_uniqueStorage()._func = newValue} + } + /// Returns true if ``func`` has been explicitly set. + var hasFunc: Bool {return _storage._func != nil} + /// Clears the value of ``func``. Subsequent reads from it will return its default value. + mutating func clearFunc() {_uniqueStorage()._func = nil} + + /// (Optional) Expressions for sorting. Only used by Scala Sorted Group Map API. + var sortingExpressions: [Spark_Connect_Expression] { + get {return _storage._sortingExpressions} + set {_uniqueStorage()._sortingExpressions = newValue} + } + + /// Below fields are only used by (Flat)MapGroupsWithState + /// (Optional) Input relation for initial State. + var initialInput: Spark_Connect_Relation { + get {return _storage._initialInput ?? Spark_Connect_Relation()} + set {_uniqueStorage()._initialInput = newValue} + } + /// Returns true if `initialInput` has been explicitly set. + var hasInitialInput: Bool {return _storage._initialInput != nil} + /// Clears the value of `initialInput`. Subsequent reads from it will return its default value. + mutating func clearInitialInput() {_uniqueStorage()._initialInput = nil} + + /// (Optional) Expressions for grouping keys of the initial state input relation. + var initialGroupingExpressions: [Spark_Connect_Expression] { + get {return _storage._initialGroupingExpressions} + set {_uniqueStorage()._initialGroupingExpressions = newValue} + } + + /// (Optional) True if MapGroupsWithState, false if FlatMapGroupsWithState. + var isMapGroupsWithState: Bool { + get {return _storage._isMapGroupsWithState ?? false} + set {_uniqueStorage()._isMapGroupsWithState = newValue} + } + /// Returns true if `isMapGroupsWithState` has been explicitly set. + var hasIsMapGroupsWithState: Bool {return _storage._isMapGroupsWithState != nil} + /// Clears the value of `isMapGroupsWithState`. Subsequent reads from it will return its default value. + mutating func clearIsMapGroupsWithState() {_uniqueStorage()._isMapGroupsWithState = nil} + + /// (Optional) The output mode of the function. + var outputMode: String { + get {return _storage._outputMode ?? String()} + set {_uniqueStorage()._outputMode = newValue} + } + /// Returns true if `outputMode` has been explicitly set. + var hasOutputMode: Bool {return _storage._outputMode != nil} + /// Clears the value of `outputMode`. Subsequent reads from it will return its default value. + mutating func clearOutputMode() {_uniqueStorage()._outputMode = nil} + + /// (Optional) Timeout configuration for groups that do not receive data for a while. + var timeoutConf: String { + get {return _storage._timeoutConf ?? String()} + set {_uniqueStorage()._timeoutConf = newValue} + } + /// Returns true if `timeoutConf` has been explicitly set. + var hasTimeoutConf: Bool {return _storage._timeoutConf != nil} + /// Clears the value of `timeoutConf`. Subsequent reads from it will return its default value. + mutating func clearTimeoutConf() {_uniqueStorage()._timeoutConf = nil} + + /// (Optional) The schema for the grouped state. + var stateSchema: Spark_Connect_DataType { + get {return _storage._stateSchema ?? Spark_Connect_DataType()} + set {_uniqueStorage()._stateSchema = newValue} + } + /// Returns true if `stateSchema` has been explicitly set. + var hasStateSchema: Bool {return _storage._stateSchema != nil} + /// Clears the value of `stateSchema`. Subsequent reads from it will return its default value. + mutating func clearStateSchema() {_uniqueStorage()._stateSchema = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_CoGroupMap: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) One input relation for CoGroup Map API - applyInPandas. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// Expressions for grouping keys of the first input relation. + var inputGroupingExpressions: [Spark_Connect_Expression] { + get {return _storage._inputGroupingExpressions} + set {_uniqueStorage()._inputGroupingExpressions = newValue} + } + + /// (Required) The other input relation. + var other: Spark_Connect_Relation { + get {return _storage._other ?? Spark_Connect_Relation()} + set {_uniqueStorage()._other = newValue} + } + /// Returns true if `other` has been explicitly set. + var hasOther: Bool {return _storage._other != nil} + /// Clears the value of `other`. Subsequent reads from it will return its default value. + mutating func clearOther() {_uniqueStorage()._other = nil} + + /// Expressions for grouping keys of the other input relation. + var otherGroupingExpressions: [Spark_Connect_Expression] { + get {return _storage._otherGroupingExpressions} + set {_uniqueStorage()._otherGroupingExpressions = newValue} + } + + /// (Required) Input user-defined function. + var `func`: Spark_Connect_CommonInlineUserDefinedFunction { + get {return _storage._func ?? Spark_Connect_CommonInlineUserDefinedFunction()} + set {_uniqueStorage()._func = newValue} + } + /// Returns true if ``func`` has been explicitly set. + var hasFunc: Bool {return _storage._func != nil} + /// Clears the value of ``func``. Subsequent reads from it will return its default value. + mutating func clearFunc() {_uniqueStorage()._func = nil} + + /// (Optional) Expressions for sorting. Only used by Scala Sorted CoGroup Map API. + var inputSortingExpressions: [Spark_Connect_Expression] { + get {return _storage._inputSortingExpressions} + set {_uniqueStorage()._inputSortingExpressions = newValue} + } + + /// (Optional) Expressions for sorting. Only used by Scala Sorted CoGroup Map API. + var otherSortingExpressions: [Spark_Connect_Expression] { + get {return _storage._otherSortingExpressions} + set {_uniqueStorage()._otherSortingExpressions = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_ApplyInPandasWithState: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation for applyInPandasWithState. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Expressions for grouping keys. + var groupingExpressions: [Spark_Connect_Expression] { + get {return _storage._groupingExpressions} + set {_uniqueStorage()._groupingExpressions = newValue} + } + + /// (Required) Input user-defined function. + var `func`: Spark_Connect_CommonInlineUserDefinedFunction { + get {return _storage._func ?? Spark_Connect_CommonInlineUserDefinedFunction()} + set {_uniqueStorage()._func = newValue} + } + /// Returns true if ``func`` has been explicitly set. + var hasFunc: Bool {return _storage._func != nil} + /// Clears the value of ``func``. Subsequent reads from it will return its default value. + mutating func clearFunc() {_uniqueStorage()._func = nil} + + /// (Required) Schema for the output DataFrame. + var outputSchema: String { + get {return _storage._outputSchema} + set {_uniqueStorage()._outputSchema = newValue} + } + + /// (Required) Schema for the state. + var stateSchema: String { + get {return _storage._stateSchema} + set {_uniqueStorage()._stateSchema = newValue} + } + + /// (Required) The output mode of the function. + var outputMode: String { + get {return _storage._outputMode} + set {_uniqueStorage()._outputMode = newValue} + } + + /// (Required) Timeout configuration for groups that do not receive data for a while. + var timeoutConf: String { + get {return _storage._timeoutConf} + set {_uniqueStorage()._timeoutConf = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_CommonInlineUserDefinedTableFunction: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Name of the user-defined table function. + var functionName: String = String() + + /// (Optional) Whether the user-defined table function is deterministic. + var deterministic: Bool = false + + /// (Optional) Function input arguments. Empty arguments are allowed. + var arguments: [Spark_Connect_Expression] = [] + + /// (Required) Type of the user-defined table function. + var function: Spark_Connect_CommonInlineUserDefinedTableFunction.OneOf_Function? = nil + + var pythonUdtf: Spark_Connect_PythonUDTF { + get { + if case .pythonUdtf(let v)? = function {return v} + return Spark_Connect_PythonUDTF() + } + set {function = .pythonUdtf(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// (Required) Type of the user-defined table function. + enum OneOf_Function: Equatable, Sendable { + case pythonUdtf(Spark_Connect_PythonUDTF) + + } + + init() {} +} + +struct Spark_Connect_PythonUDTF: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Optional) Return type of the Python UDTF. + var returnType: Spark_Connect_DataType { + get {return _returnType ?? Spark_Connect_DataType()} + set {_returnType = newValue} + } + /// Returns true if `returnType` has been explicitly set. + var hasReturnType: Bool {return self._returnType != nil} + /// Clears the value of `returnType`. Subsequent reads from it will return its default value. + mutating func clearReturnType() {self._returnType = nil} + + /// (Required) EvalType of the Python UDTF. + var evalType: Int32 = 0 + + /// (Required) The encoded commands of the Python UDTF. + var command: Data = Data() + + /// (Required) Python version being used in the client. + var pythonVer: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _returnType: Spark_Connect_DataType? = nil +} + +struct Spark_Connect_CommonInlineUserDefinedDataSource: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Name of the data source. + var name: String = String() + + /// (Required) The data source type. + var dataSource: Spark_Connect_CommonInlineUserDefinedDataSource.OneOf_DataSource? = nil + + var pythonDataSource: Spark_Connect_PythonDataSource { + get { + if case .pythonDataSource(let v)? = dataSource {return v} + return Spark_Connect_PythonDataSource() + } + set {dataSource = .pythonDataSource(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + /// (Required) The data source type. + enum OneOf_DataSource: Equatable, Sendable { + case pythonDataSource(Spark_Connect_PythonDataSource) + + } + + init() {} +} + +struct Spark_Connect_PythonDataSource: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The encoded commands of the Python data source. + var command: Data = Data() + + /// (Required) Python version being used in the client. + var pythonVer: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} +} + +/// Collect arbitrary (named) metrics from a dataset. +struct Spark_Connect_CollectMetrics: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The input relation. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) Name of the metrics. + var name: String { + get {return _storage._name} + set {_uniqueStorage()._name = newValue} + } + + /// (Required) The metric sequence. + var metrics: [Spark_Connect_Expression] { + get {return _storage._metrics} + set {_uniqueStorage()._metrics = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +struct Spark_Connect_Parse: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Input relation to Parse. The input is expected to have single text column. + var input: Spark_Connect_Relation { + get {return _storage._input ?? Spark_Connect_Relation()} + set {_uniqueStorage()._input = newValue} + } + /// Returns true if `input` has been explicitly set. + var hasInput: Bool {return _storage._input != nil} + /// Clears the value of `input`. Subsequent reads from it will return its default value. + mutating func clearInput() {_uniqueStorage()._input = nil} + + /// (Required) The expected format of the text. + var format: Spark_Connect_Parse.ParseFormat { + get {return _storage._format} + set {_uniqueStorage()._format = newValue} + } + + /// (Optional) DataType representing the schema. If not set, Spark will infer the schema. + var schema: Spark_Connect_DataType { + get {return _storage._schema ?? Spark_Connect_DataType()} + set {_uniqueStorage()._schema = newValue} + } + /// Returns true if `schema` has been explicitly set. + var hasSchema: Bool {return _storage._schema != nil} + /// Clears the value of `schema`. Subsequent reads from it will return its default value. + mutating func clearSchema() {_uniqueStorage()._schema = nil} + + /// Options for the csv/json parser. The map key is case insensitive. + var options: Dictionary { + get {return _storage._options} + set {_uniqueStorage()._options = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum ParseFormat: SwiftProtobuf.Enum, Swift.CaseIterable { + typealias RawValue = Int + case unspecified // = 0 + case csv // = 1 + case json // = 2 + case UNRECOGNIZED(Int) + + init() { + self = .unspecified + } + + init?(rawValue: Int) { + switch rawValue { + case 0: self = .unspecified + case 1: self = .csv + case 2: self = .json + default: self = .UNRECOGNIZED(rawValue) + } + } + + var rawValue: Int { + switch self { + case .unspecified: return 0 + case .csv: return 1 + case .json: return 2 + case .UNRECOGNIZED(let i): return i + } + } + + // The compiler won't synthesize support with the UNRECOGNIZED case. + static let allCases: [Spark_Connect_Parse.ParseFormat] = [ + .unspecified, + .csv, + .json, + ] + + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[AsOfJoin]]. +/// +/// `left` and `right` must be present. +struct Spark_Connect_AsOfJoin: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Left input relation for a Join. + var left: Spark_Connect_Relation { + get {return _storage._left ?? Spark_Connect_Relation()} + set {_uniqueStorage()._left = newValue} + } + /// Returns true if `left` has been explicitly set. + var hasLeft: Bool {return _storage._left != nil} + /// Clears the value of `left`. Subsequent reads from it will return its default value. + mutating func clearLeft() {_uniqueStorage()._left = nil} + + /// (Required) Right input relation for a Join. + var right: Spark_Connect_Relation { + get {return _storage._right ?? Spark_Connect_Relation()} + set {_uniqueStorage()._right = newValue} + } + /// Returns true if `right` has been explicitly set. + var hasRight: Bool {return _storage._right != nil} + /// Clears the value of `right`. Subsequent reads from it will return its default value. + mutating func clearRight() {_uniqueStorage()._right = nil} + + /// (Required) Field to join on in left DataFrame + var leftAsOf: Spark_Connect_Expression { + get {return _storage._leftAsOf ?? Spark_Connect_Expression()} + set {_uniqueStorage()._leftAsOf = newValue} + } + /// Returns true if `leftAsOf` has been explicitly set. + var hasLeftAsOf: Bool {return _storage._leftAsOf != nil} + /// Clears the value of `leftAsOf`. Subsequent reads from it will return its default value. + mutating func clearLeftAsOf() {_uniqueStorage()._leftAsOf = nil} + + /// (Required) Field to join on in right DataFrame + var rightAsOf: Spark_Connect_Expression { + get {return _storage._rightAsOf ?? Spark_Connect_Expression()} + set {_uniqueStorage()._rightAsOf = newValue} + } + /// Returns true if `rightAsOf` has been explicitly set. + var hasRightAsOf: Bool {return _storage._rightAsOf != nil} + /// Clears the value of `rightAsOf`. Subsequent reads from it will return its default value. + mutating func clearRightAsOf() {_uniqueStorage()._rightAsOf = nil} + + /// (Optional) The join condition. Could be unset when `using_columns` is utilized. + /// + /// This field does not co-exist with using_columns. + var joinExpr: Spark_Connect_Expression { + get {return _storage._joinExpr ?? Spark_Connect_Expression()} + set {_uniqueStorage()._joinExpr = newValue} + } + /// Returns true if `joinExpr` has been explicitly set. + var hasJoinExpr: Bool {return _storage._joinExpr != nil} + /// Clears the value of `joinExpr`. Subsequent reads from it will return its default value. + mutating func clearJoinExpr() {_uniqueStorage()._joinExpr = nil} + + /// Optional. using_columns provides a list of columns that should present on both sides of + /// the join inputs that this Join will join on. For example A JOIN B USING col_name is + /// equivalent to A JOIN B on A.col_name = B.col_name. + /// + /// This field does not co-exist with join_condition. + var usingColumns: [String] { + get {return _storage._usingColumns} + set {_uniqueStorage()._usingColumns = newValue} + } + + /// (Required) The join type. + var joinType: String { + get {return _storage._joinType} + set {_uniqueStorage()._joinType = newValue} + } + + /// (Optional) The asof tolerance within this range. + var tolerance: Spark_Connect_Expression { + get {return _storage._tolerance ?? Spark_Connect_Expression()} + set {_uniqueStorage()._tolerance = newValue} + } + /// Returns true if `tolerance` has been explicitly set. + var hasTolerance: Bool {return _storage._tolerance != nil} + /// Clears the value of `tolerance`. Subsequent reads from it will return its default value. + mutating func clearTolerance() {_uniqueStorage()._tolerance = nil} + + /// (Required) Whether allow matching with the same value or not. + var allowExactMatches: Bool { + get {return _storage._allowExactMatches} + set {_uniqueStorage()._allowExactMatches = newValue} + } + + /// (Required) Whether to search for prior, subsequent, or closest matches. + var direction: String { + get {return _storage._direction} + set {_uniqueStorage()._direction = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +/// Relation of type [[LateralJoin]]. +/// +/// `left` and `right` must be present. +struct Spark_Connect_LateralJoin: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) Left input relation for a Join. + var left: Spark_Connect_Relation { + get {return _storage._left ?? Spark_Connect_Relation()} + set {_uniqueStorage()._left = newValue} + } + /// Returns true if `left` has been explicitly set. + var hasLeft: Bool {return _storage._left != nil} + /// Clears the value of `left`. Subsequent reads from it will return its default value. + mutating func clearLeft() {_uniqueStorage()._left = nil} + + /// (Required) Right input relation for a Join. + var right: Spark_Connect_Relation { + get {return _storage._right ?? Spark_Connect_Relation()} + set {_uniqueStorage()._right = newValue} + } + /// Returns true if `right` has been explicitly set. + var hasRight: Bool {return _storage._right != nil} + /// Clears the value of `right`. Subsequent reads from it will return its default value. + mutating func clearRight() {_uniqueStorage()._right = nil} + + /// (Optional) The join condition. + var joinCondition: Spark_Connect_Expression { + get {return _storage._joinCondition ?? Spark_Connect_Expression()} + set {_uniqueStorage()._joinCondition = newValue} + } + /// Returns true if `joinCondition` has been explicitly set. + var hasJoinCondition: Bool {return _storage._joinCondition != nil} + /// Clears the value of `joinCondition`. Subsequent reads from it will return its default value. + mutating func clearJoinCondition() {_uniqueStorage()._joinCondition = nil} + + /// (Required) The join type. + var joinType: Spark_Connect_Join.JoinType { + get {return _storage._joinType} + set {_uniqueStorage()._joinType = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_Relation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Relation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "common"), + 2: .same(proto: "read"), + 3: .same(proto: "project"), + 4: .same(proto: "filter"), + 5: .same(proto: "join"), + 6: .standard(proto: "set_op"), + 7: .same(proto: "sort"), + 8: .same(proto: "limit"), + 9: .same(proto: "aggregate"), + 10: .same(proto: "sql"), + 11: .standard(proto: "local_relation"), + 12: .same(proto: "sample"), + 13: .same(proto: "offset"), + 14: .same(proto: "deduplicate"), + 15: .same(proto: "range"), + 16: .standard(proto: "subquery_alias"), + 17: .same(proto: "repartition"), + 18: .standard(proto: "to_df"), + 19: .standard(proto: "with_columns_renamed"), + 20: .standard(proto: "show_string"), + 21: .same(proto: "drop"), + 22: .same(proto: "tail"), + 23: .standard(proto: "with_columns"), + 24: .same(proto: "hint"), + 25: .same(proto: "unpivot"), + 26: .standard(proto: "to_schema"), + 27: .standard(proto: "repartition_by_expression"), + 28: .standard(proto: "map_partitions"), + 29: .standard(proto: "collect_metrics"), + 30: .same(proto: "parse"), + 31: .standard(proto: "group_map"), + 32: .standard(proto: "co_group_map"), + 33: .standard(proto: "with_watermark"), + 34: .standard(proto: "apply_in_pandas_with_state"), + 35: .standard(proto: "html_string"), + 36: .standard(proto: "cached_local_relation"), + 37: .standard(proto: "cached_remote_relation"), + 38: .standard(proto: "common_inline_user_defined_table_function"), + 39: .standard(proto: "as_of_join"), + 40: .standard(proto: "common_inline_user_defined_data_source"), + 41: .standard(proto: "with_relations"), + 42: .same(proto: "transpose"), + 43: .standard(proto: "unresolved_table_valued_function"), + 44: .standard(proto: "lateral_join"), + 90: .standard(proto: "fill_na"), + 91: .standard(proto: "drop_na"), + 92: .same(proto: "replace"), + 100: .same(proto: "summary"), + 101: .same(proto: "crosstab"), + 102: .same(proto: "describe"), + 103: .same(proto: "cov"), + 104: .same(proto: "corr"), + 105: .standard(proto: "approx_quantile"), + 106: .standard(proto: "freq_items"), + 107: .standard(proto: "sample_by"), + 200: .same(proto: "catalog"), + 300: .standard(proto: "ml_relation"), + 998: .same(proto: "extension"), + 999: .same(proto: "unknown"), + ] + + fileprivate class _StorageClass { + var _common: Spark_Connect_RelationCommon? = nil + var _relType: Spark_Connect_Relation.OneOf_RelType? + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _common = source._common + _relType = source._relType + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._common) }() + case 2: try { + var v: Spark_Connect_Read? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .read(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .read(v) + } + }() + case 3: try { + var v: Spark_Connect_Project? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .project(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .project(v) + } + }() + case 4: try { + var v: Spark_Connect_Filter? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .filter(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .filter(v) + } + }() + case 5: try { + var v: Spark_Connect_Join? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .join(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .join(v) + } + }() + case 6: try { + var v: Spark_Connect_SetOperation? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .setOp(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .setOp(v) + } + }() + case 7: try { + var v: Spark_Connect_Sort? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .sort(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .sort(v) + } + }() + case 8: try { + var v: Spark_Connect_Limit? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .limit(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .limit(v) + } + }() + case 9: try { + var v: Spark_Connect_Aggregate? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .aggregate(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .aggregate(v) + } + }() + case 10: try { + var v: Spark_Connect_SQL? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .sql(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .sql(v) + } + }() + case 11: try { + var v: Spark_Connect_LocalRelation? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .localRelation(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .localRelation(v) + } + }() + case 12: try { + var v: Spark_Connect_Sample? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .sample(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .sample(v) + } + }() + case 13: try { + var v: Spark_Connect_Offset? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .offset(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .offset(v) + } + }() + case 14: try { + var v: Spark_Connect_Deduplicate? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .deduplicate(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .deduplicate(v) + } + }() + case 15: try { + var v: Spark_Connect_Range? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .range(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .range(v) + } + }() + case 16: try { + var v: Spark_Connect_SubqueryAlias? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .subqueryAlias(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .subqueryAlias(v) + } + }() + case 17: try { + var v: Spark_Connect_Repartition? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .repartition(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .repartition(v) + } + }() + case 18: try { + var v: Spark_Connect_ToDF? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .toDf(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .toDf(v) + } + }() + case 19: try { + var v: Spark_Connect_WithColumnsRenamed? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .withColumnsRenamed(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .withColumnsRenamed(v) + } + }() + case 20: try { + var v: Spark_Connect_ShowString? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .showString(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .showString(v) + } + }() + case 21: try { + var v: Spark_Connect_Drop? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .drop(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .drop(v) + } + }() + case 22: try { + var v: Spark_Connect_Tail? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .tail(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .tail(v) + } + }() + case 23: try { + var v: Spark_Connect_WithColumns? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .withColumns(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .withColumns(v) + } + }() + case 24: try { + var v: Spark_Connect_Hint? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .hint(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .hint(v) + } + }() + case 25: try { + var v: Spark_Connect_Unpivot? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .unpivot(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .unpivot(v) + } + }() + case 26: try { + var v: Spark_Connect_ToSchema? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .toSchema(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .toSchema(v) + } + }() + case 27: try { + var v: Spark_Connect_RepartitionByExpression? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .repartitionByExpression(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .repartitionByExpression(v) + } + }() + case 28: try { + var v: Spark_Connect_MapPartitions? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .mapPartitions(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .mapPartitions(v) + } + }() + case 29: try { + var v: Spark_Connect_CollectMetrics? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .collectMetrics(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .collectMetrics(v) + } + }() + case 30: try { + var v: Spark_Connect_Parse? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .parse(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .parse(v) + } + }() + case 31: try { + var v: Spark_Connect_GroupMap? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .groupMap(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .groupMap(v) + } + }() + case 32: try { + var v: Spark_Connect_CoGroupMap? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .coGroupMap(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .coGroupMap(v) + } + }() + case 33: try { + var v: Spark_Connect_WithWatermark? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .withWatermark(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .withWatermark(v) + } + }() + case 34: try { + var v: Spark_Connect_ApplyInPandasWithState? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .applyInPandasWithState(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .applyInPandasWithState(v) + } + }() + case 35: try { + var v: Spark_Connect_HtmlString? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .htmlString(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .htmlString(v) + } + }() + case 36: try { + var v: Spark_Connect_CachedLocalRelation? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .cachedLocalRelation(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .cachedLocalRelation(v) + } + }() + case 37: try { + var v: Spark_Connect_CachedRemoteRelation? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .cachedRemoteRelation(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .cachedRemoteRelation(v) + } + }() + case 38: try { + var v: Spark_Connect_CommonInlineUserDefinedTableFunction? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .commonInlineUserDefinedTableFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .commonInlineUserDefinedTableFunction(v) + } + }() + case 39: try { + var v: Spark_Connect_AsOfJoin? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .asOfJoin(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .asOfJoin(v) + } + }() + case 40: try { + var v: Spark_Connect_CommonInlineUserDefinedDataSource? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .commonInlineUserDefinedDataSource(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .commonInlineUserDefinedDataSource(v) + } + }() + case 41: try { + var v: Spark_Connect_WithRelations? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .withRelations(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .withRelations(v) + } + }() + case 42: try { + var v: Spark_Connect_Transpose? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .transpose(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .transpose(v) + } + }() + case 43: try { + var v: Spark_Connect_UnresolvedTableValuedFunction? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .unresolvedTableValuedFunction(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .unresolvedTableValuedFunction(v) + } + }() + case 44: try { + var v: Spark_Connect_LateralJoin? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .lateralJoin(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .lateralJoin(v) + } + }() + case 90: try { + var v: Spark_Connect_NAFill? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .fillNa(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .fillNa(v) + } + }() + case 91: try { + var v: Spark_Connect_NADrop? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .dropNa(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .dropNa(v) + } + }() + case 92: try { + var v: Spark_Connect_NAReplace? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .replace(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .replace(v) + } + }() + case 100: try { + var v: Spark_Connect_StatSummary? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .summary(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .summary(v) + } + }() + case 101: try { + var v: Spark_Connect_StatCrosstab? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .crosstab(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .crosstab(v) + } + }() + case 102: try { + var v: Spark_Connect_StatDescribe? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .describe(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .describe(v) + } + }() + case 103: try { + var v: Spark_Connect_StatCov? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .cov(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .cov(v) + } + }() + case 104: try { + var v: Spark_Connect_StatCorr? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .corr(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .corr(v) + } + }() + case 105: try { + var v: Spark_Connect_StatApproxQuantile? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .approxQuantile(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .approxQuantile(v) + } + }() + case 106: try { + var v: Spark_Connect_StatFreqItems? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .freqItems(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .freqItems(v) + } + }() + case 107: try { + var v: Spark_Connect_StatSampleBy? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .sampleBy(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .sampleBy(v) + } + }() + case 200: try { + var v: Spark_Connect_Catalog? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .catalog(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .catalog(v) + } + }() + case 300: try { + var v: Spark_Connect_MlRelation? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .mlRelation(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .mlRelation(v) + } + }() + case 998: try { + var v: SwiftProtobuf.Google_Protobuf_Any? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .extension(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .extension(v) + } + }() + case 999: try { + var v: Spark_Connect_Unknown? + var hadOneofValue = false + if let current = _storage._relType { + hadOneofValue = true + if case .unknown(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._relType = .unknown(v) + } + }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._common { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + switch _storage._relType { + case .read?: try { + guard case .read(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .project?: try { + guard case .project(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .filter?: try { + guard case .filter(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .join?: try { + guard case .join(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .setOp?: try { + guard case .setOp(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case .sort?: try { + guard case .sort(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case .limit?: try { + guard case .limit(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + }() + case .aggregate?: try { + guard case .aggregate(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 9) + }() + case .sql?: try { + guard case .sql(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 10) + }() + case .localRelation?: try { + guard case .localRelation(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 11) + }() + case .sample?: try { + guard case .sample(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 12) + }() + case .offset?: try { + guard case .offset(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 13) + }() + case .deduplicate?: try { + guard case .deduplicate(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 14) + }() + case .range?: try { + guard case .range(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 15) + }() + case .subqueryAlias?: try { + guard case .subqueryAlias(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 16) + }() + case .repartition?: try { + guard case .repartition(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 17) + }() + case .toDf?: try { + guard case .toDf(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 18) + }() + case .withColumnsRenamed?: try { + guard case .withColumnsRenamed(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 19) + }() + case .showString?: try { + guard case .showString(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 20) + }() + case .drop?: try { + guard case .drop(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 21) + }() + case .tail?: try { + guard case .tail(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 22) + }() + case .withColumns?: try { + guard case .withColumns(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 23) + }() + case .hint?: try { + guard case .hint(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 24) + }() + case .unpivot?: try { + guard case .unpivot(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 25) + }() + case .toSchema?: try { + guard case .toSchema(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 26) + }() + case .repartitionByExpression?: try { + guard case .repartitionByExpression(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 27) + }() + case .mapPartitions?: try { + guard case .mapPartitions(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 28) + }() + case .collectMetrics?: try { + guard case .collectMetrics(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 29) + }() + case .parse?: try { + guard case .parse(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 30) + }() + case .groupMap?: try { + guard case .groupMap(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 31) + }() + case .coGroupMap?: try { + guard case .coGroupMap(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 32) + }() + case .withWatermark?: try { + guard case .withWatermark(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 33) + }() + case .applyInPandasWithState?: try { + guard case .applyInPandasWithState(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 34) + }() + case .htmlString?: try { + guard case .htmlString(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 35) + }() + case .cachedLocalRelation?: try { + guard case .cachedLocalRelation(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 36) + }() + case .cachedRemoteRelation?: try { + guard case .cachedRemoteRelation(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 37) + }() + case .commonInlineUserDefinedTableFunction?: try { + guard case .commonInlineUserDefinedTableFunction(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 38) + }() + case .asOfJoin?: try { + guard case .asOfJoin(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 39) + }() + case .commonInlineUserDefinedDataSource?: try { + guard case .commonInlineUserDefinedDataSource(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 40) + }() + case .withRelations?: try { + guard case .withRelations(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 41) + }() + case .transpose?: try { + guard case .transpose(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 42) + }() + case .unresolvedTableValuedFunction?: try { + guard case .unresolvedTableValuedFunction(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 43) + }() + case .lateralJoin?: try { + guard case .lateralJoin(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 44) + }() + case .fillNa?: try { + guard case .fillNa(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 90) + }() + case .dropNa?: try { + guard case .dropNa(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 91) + }() + case .replace?: try { + guard case .replace(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 92) + }() + case .summary?: try { + guard case .summary(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 100) + }() + case .crosstab?: try { + guard case .crosstab(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 101) + }() + case .describe?: try { + guard case .describe(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 102) + }() + case .cov?: try { + guard case .cov(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 103) + }() + case .corr?: try { + guard case .corr(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 104) + }() + case .approxQuantile?: try { + guard case .approxQuantile(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 105) + }() + case .freqItems?: try { + guard case .freqItems(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 106) + }() + case .sampleBy?: try { + guard case .sampleBy(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 107) + }() + case .catalog?: try { + guard case .catalog(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 200) + }() + case .mlRelation?: try { + guard case .mlRelation(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 300) + }() + case .extension?: try { + guard case .extension(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 998) + }() + case .unknown?: try { + guard case .unknown(let v)? = _storage._relType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 999) + }() + case nil: break + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Relation, rhs: Spark_Connect_Relation) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._common != rhs_storage._common {return false} + if _storage._relType != rhs_storage._relType {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlRelation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".MlRelation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "transform"), + 2: .same(proto: "fetch"), + ] + + fileprivate class _StorageClass { + var _mlType: Spark_Connect_MlRelation.OneOf_MlType? + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _mlType = source._mlType + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_MlRelation.Transform? + var hadOneofValue = false + if let current = _storage._mlType { + hadOneofValue = true + if case .transform(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._mlType = .transform(v) + } + }() + case 2: try { + var v: Spark_Connect_Fetch? + var hadOneofValue = false + if let current = _storage._mlType { + hadOneofValue = true + if case .fetch(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._mlType = .fetch(v) + } + }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch _storage._mlType { + case .transform?: try { + guard case .transform(let v)? = _storage._mlType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .fetch?: try { + guard case .fetch(let v)? = _storage._mlType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case nil: break + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlRelation, rhs: Spark_Connect_MlRelation) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._mlType != rhs_storage._mlType {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MlRelation.Transform: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_MlRelation.protoMessageName + ".Transform" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "obj_ref"), + 2: .same(proto: "transformer"), + 3: .same(proto: "input"), + 4: .same(proto: "params"), + ] + + fileprivate class _StorageClass { + var _operator: Spark_Connect_MlRelation.Transform.OneOf_Operator? + var _input: Spark_Connect_Relation? = nil + var _params: Spark_Connect_MlParams? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _operator = source._operator + _input = source._input + _params = source._params + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_ObjectRef? + var hadOneofValue = false + if let current = _storage._operator { + hadOneofValue = true + if case .objRef(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._operator = .objRef(v) + } + }() + case 2: try { + var v: Spark_Connect_MlOperator? + var hadOneofValue = false + if let current = _storage._operator { + hadOneofValue = true + if case .transformer(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._operator = .transformer(v) + } + }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 4: try { try decoder.decodeSingularMessageField(value: &_storage._params) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch _storage._operator { + case .objRef?: try { + guard case .objRef(let v)? = _storage._operator else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .transformer?: try { + guard case .transformer(let v)? = _storage._operator else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case nil: break + } + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + try { if let v = _storage._params { + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MlRelation.Transform, rhs: Spark_Connect_MlRelation.Transform) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._operator != rhs_storage._operator {return false} + if _storage._input != rhs_storage._input {return false} + if _storage._params != rhs_storage._params {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Fetch: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Fetch" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "obj_ref"), + 2: .same(proto: "methods"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._objRef) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.methods) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._objRef { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.methods.isEmpty { + try visitor.visitRepeatedMessageField(value: self.methods, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Fetch, rhs: Spark_Connect_Fetch) -> Bool { + if lhs._objRef != rhs._objRef {return false} + if lhs.methods != rhs.methods {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Fetch.Method: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Fetch.protoMessageName + ".Method" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "method"), + 2: .same(proto: "args"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.method) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.args) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.method.isEmpty { + try visitor.visitSingularStringField(value: self.method, fieldNumber: 1) + } + if !self.args.isEmpty { + try visitor.visitRepeatedMessageField(value: self.args, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Fetch.Method, rhs: Spark_Connect_Fetch.Method) -> Bool { + if lhs.method != rhs.method {return false} + if lhs.args != rhs.args {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Fetch.Method.Args: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Fetch.Method.protoMessageName + ".Args" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "param"), + 2: .same(proto: "input"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_Expression.Literal? + var hadOneofValue = false + if let current = self.argsType { + hadOneofValue = true + if case .param(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.argsType = .param(v) + } + }() + case 2: try { + var v: Spark_Connect_Relation? + var hadOneofValue = false + if let current = self.argsType { + hadOneofValue = true + if case .input(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.argsType = .input(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.argsType { + case .param?: try { + guard case .param(let v)? = self.argsType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .input?: try { + guard case .input(let v)? = self.argsType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case nil: break + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Fetch.Method.Args, rhs: Spark_Connect_Fetch.Method.Args) -> Bool { + if lhs.argsType != rhs.argsType {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Unknown: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Unknown" + static let _protobuf_nameMap = SwiftProtobuf._NameMap() + + mutating func decodeMessage(decoder: inout D) throws { + // Load everything into unknown fields + while try decoder.nextFieldNumber() != nil {} + } + + func traverse(visitor: inout V) throws { + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Unknown, rhs: Spark_Connect_Unknown) -> Bool { + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_RelationCommon: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".RelationCommon" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "source_info"), + 2: .standard(proto: "plan_id"), + 3: .same(proto: "origin"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.sourceInfo) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self._planID) }() + case 3: try { try decoder.decodeSingularMessageField(value: &self._origin) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.sourceInfo.isEmpty { + try visitor.visitSingularStringField(value: self.sourceInfo, fieldNumber: 1) + } + try { if let v = self._planID { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 2) + } }() + try { if let v = self._origin { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_RelationCommon, rhs: Spark_Connect_RelationCommon) -> Bool { + if lhs.sourceInfo != rhs.sourceInfo {return false} + if lhs._planID != rhs._planID {return false} + if lhs._origin != rhs._origin {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_SQL: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".SQL" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "query"), + 2: .same(proto: "args"), + 3: .standard(proto: "pos_args"), + 4: .standard(proto: "named_arguments"), + 5: .standard(proto: "pos_arguments"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.query) }() + case 2: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.args) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &self.posArgs) }() + case 4: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: &self.namedArguments) }() + case 5: try { try decoder.decodeRepeatedMessageField(value: &self.posArguments) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.query.isEmpty { + try visitor.visitSingularStringField(value: self.query, fieldNumber: 1) + } + if !self.args.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.args, fieldNumber: 2) + } + if !self.posArgs.isEmpty { + try visitor.visitRepeatedMessageField(value: self.posArgs, fieldNumber: 3) + } + if !self.namedArguments.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMessageMap.self, value: self.namedArguments, fieldNumber: 4) + } + if !self.posArguments.isEmpty { + try visitor.visitRepeatedMessageField(value: self.posArguments, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_SQL, rhs: Spark_Connect_SQL) -> Bool { + if lhs.query != rhs.query {return false} + if lhs.args != rhs.args {return false} + if lhs.posArgs != rhs.posArgs {return false} + if lhs.namedArguments != rhs.namedArguments {return false} + if lhs.posArguments != rhs.posArguments {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WithRelations: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".WithRelations" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "root"), + 2: .same(proto: "references"), + ] + + fileprivate class _StorageClass { + var _root: Spark_Connect_Relation? = nil + var _references: [Spark_Connect_Relation] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _root = source._root + _references = source._references + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._root) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._references) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._root { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._references.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._references, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WithRelations, rhs: Spark_Connect_WithRelations) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._root != rhs_storage._root {return false} + if _storage._references != rhs_storage._references {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Read: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Read" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "named_table"), + 2: .standard(proto: "data_source"), + 3: .standard(proto: "is_streaming"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_Read.NamedTable? + var hadOneofValue = false + if let current = self.readType { + hadOneofValue = true + if case .namedTable(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.readType = .namedTable(v) + } + }() + case 2: try { + var v: Spark_Connect_Read.DataSource? + var hadOneofValue = false + if let current = self.readType { + hadOneofValue = true + if case .dataSource(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.readType = .dataSource(v) + } + }() + case 3: try { try decoder.decodeSingularBoolField(value: &self.isStreaming) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch self.readType { + case .namedTable?: try { + guard case .namedTable(let v)? = self.readType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .dataSource?: try { + guard case .dataSource(let v)? = self.readType else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case nil: break + } + if self.isStreaming != false { + try visitor.visitSingularBoolField(value: self.isStreaming, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Read, rhs: Spark_Connect_Read) -> Bool { + if lhs.readType != rhs.readType {return false} + if lhs.isStreaming != rhs.isStreaming {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Read.NamedTable: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Read.protoMessageName + ".NamedTable" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "unparsed_identifier"), + 2: .same(proto: "options"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.unparsedIdentifier) }() + case 2: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.options) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.unparsedIdentifier.isEmpty { + try visitor.visitSingularStringField(value: self.unparsedIdentifier, fieldNumber: 1) + } + if !self.options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.options, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Read.NamedTable, rhs: Spark_Connect_Read.NamedTable) -> Bool { + if lhs.unparsedIdentifier != rhs.unparsedIdentifier {return false} + if lhs.options != rhs.options {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Read.DataSource: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Read.protoMessageName + ".DataSource" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "format"), + 2: .same(proto: "schema"), + 3: .same(proto: "options"), + 4: .same(proto: "paths"), + 5: .same(proto: "predicates"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self._format) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._schema) }() + case 3: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &self.options) }() + case 4: try { try decoder.decodeRepeatedStringField(value: &self.paths) }() + case 5: try { try decoder.decodeRepeatedStringField(value: &self.predicates) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._format { + try visitor.visitSingularStringField(value: v, fieldNumber: 1) + } }() + try { if let v = self._schema { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + if !self.options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: self.options, fieldNumber: 3) + } + if !self.paths.isEmpty { + try visitor.visitRepeatedStringField(value: self.paths, fieldNumber: 4) + } + if !self.predicates.isEmpty { + try visitor.visitRepeatedStringField(value: self.predicates, fieldNumber: 5) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Read.DataSource, rhs: Spark_Connect_Read.DataSource) -> Bool { + if lhs._format != rhs._format {return false} + if lhs._schema != rhs._schema {return false} + if lhs.options != rhs.options {return false} + if lhs.paths != rhs.paths {return false} + if lhs.predicates != rhs.predicates {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Project: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Project" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 3: .same(proto: "expressions"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _expressions: [Spark_Connect_Expression] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _expressions = source._expressions + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._expressions) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._expressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._expressions, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Project, rhs: Spark_Connect_Project) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._expressions != rhs_storage._expressions {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Filter: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Filter" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "condition"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _condition: Spark_Connect_Expression? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _condition = source._condition + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._condition) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._condition { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Filter, rhs: Spark_Connect_Filter) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._condition != rhs_storage._condition {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Join: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Join" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "left"), + 2: .same(proto: "right"), + 3: .standard(proto: "join_condition"), + 4: .standard(proto: "join_type"), + 5: .standard(proto: "using_columns"), + 6: .standard(proto: "join_data_type"), + ] + + fileprivate class _StorageClass { + var _left: Spark_Connect_Relation? = nil + var _right: Spark_Connect_Relation? = nil + var _joinCondition: Spark_Connect_Expression? = nil + var _joinType: Spark_Connect_Join.JoinType = .unspecified + var _usingColumns: [String] = [] + var _joinDataType: Spark_Connect_Join.JoinDataType? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _left = source._left + _right = source._right + _joinCondition = source._joinCondition + _joinType = source._joinType + _usingColumns = source._usingColumns + _joinDataType = source._joinDataType + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._left) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._right) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._joinCondition) }() + case 4: try { try decoder.decodeSingularEnumField(value: &_storage._joinType) }() + case 5: try { try decoder.decodeRepeatedStringField(value: &_storage._usingColumns) }() + case 6: try { try decoder.decodeSingularMessageField(value: &_storage._joinDataType) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._left { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._right { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = _storage._joinCondition { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if _storage._joinType != .unspecified { + try visitor.visitSingularEnumField(value: _storage._joinType, fieldNumber: 4) + } + if !_storage._usingColumns.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._usingColumns, fieldNumber: 5) + } + try { if let v = _storage._joinDataType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Join, rhs: Spark_Connect_Join) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._left != rhs_storage._left {return false} + if _storage._right != rhs_storage._right {return false} + if _storage._joinCondition != rhs_storage._joinCondition {return false} + if _storage._joinType != rhs_storage._joinType {return false} + if _storage._usingColumns != rhs_storage._usingColumns {return false} + if _storage._joinDataType != rhs_storage._joinDataType {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Join.JoinType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "JOIN_TYPE_UNSPECIFIED"), + 1: .same(proto: "JOIN_TYPE_INNER"), + 2: .same(proto: "JOIN_TYPE_FULL_OUTER"), + 3: .same(proto: "JOIN_TYPE_LEFT_OUTER"), + 4: .same(proto: "JOIN_TYPE_RIGHT_OUTER"), + 5: .same(proto: "JOIN_TYPE_LEFT_ANTI"), + 6: .same(proto: "JOIN_TYPE_LEFT_SEMI"), + 7: .same(proto: "JOIN_TYPE_CROSS"), + ] +} + +extension Spark_Connect_Join.JoinDataType: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Join.protoMessageName + ".JoinDataType" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "is_left_struct"), + 2: .standard(proto: "is_right_struct"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBoolField(value: &self.isLeftStruct) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self.isRightStruct) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.isLeftStruct != false { + try visitor.visitSingularBoolField(value: self.isLeftStruct, fieldNumber: 1) + } + if self.isRightStruct != false { + try visitor.visitSingularBoolField(value: self.isRightStruct, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Join.JoinDataType, rhs: Spark_Connect_Join.JoinDataType) -> Bool { + if lhs.isLeftStruct != rhs.isLeftStruct {return false} + if lhs.isRightStruct != rhs.isRightStruct {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_SetOperation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".SetOperation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "left_input"), + 2: .standard(proto: "right_input"), + 3: .standard(proto: "set_op_type"), + 4: .standard(proto: "is_all"), + 5: .standard(proto: "by_name"), + 6: .standard(proto: "allow_missing_columns"), + ] + + fileprivate class _StorageClass { + var _leftInput: Spark_Connect_Relation? = nil + var _rightInput: Spark_Connect_Relation? = nil + var _setOpType: Spark_Connect_SetOperation.SetOpType = .unspecified + var _isAll: Bool? = nil + var _byName: Bool? = nil + var _allowMissingColumns: Bool? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _leftInput = source._leftInput + _rightInput = source._rightInput + _setOpType = source._setOpType + _isAll = source._isAll + _byName = source._byName + _allowMissingColumns = source._allowMissingColumns + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._leftInput) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._rightInput) }() + case 3: try { try decoder.decodeSingularEnumField(value: &_storage._setOpType) }() + case 4: try { try decoder.decodeSingularBoolField(value: &_storage._isAll) }() + case 5: try { try decoder.decodeSingularBoolField(value: &_storage._byName) }() + case 6: try { try decoder.decodeSingularBoolField(value: &_storage._allowMissingColumns) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._leftInput { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._rightInput { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if _storage._setOpType != .unspecified { + try visitor.visitSingularEnumField(value: _storage._setOpType, fieldNumber: 3) + } + try { if let v = _storage._isAll { + try visitor.visitSingularBoolField(value: v, fieldNumber: 4) + } }() + try { if let v = _storage._byName { + try visitor.visitSingularBoolField(value: v, fieldNumber: 5) + } }() + try { if let v = _storage._allowMissingColumns { + try visitor.visitSingularBoolField(value: v, fieldNumber: 6) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_SetOperation, rhs: Spark_Connect_SetOperation) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._leftInput != rhs_storage._leftInput {return false} + if _storage._rightInput != rhs_storage._rightInput {return false} + if _storage._setOpType != rhs_storage._setOpType {return false} + if _storage._isAll != rhs_storage._isAll {return false} + if _storage._byName != rhs_storage._byName {return false} + if _storage._allowMissingColumns != rhs_storage._allowMissingColumns {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_SetOperation.SetOpType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "SET_OP_TYPE_UNSPECIFIED"), + 1: .same(proto: "SET_OP_TYPE_INTERSECT"), + 2: .same(proto: "SET_OP_TYPE_UNION"), + 3: .same(proto: "SET_OP_TYPE_EXCEPT"), + ] +} + +extension Spark_Connect_Limit: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Limit" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "limit"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _limit: Int32 = 0 + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _limit = source._limit + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &_storage._limit) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._limit != 0 { + try visitor.visitSingularInt32Field(value: _storage._limit, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Limit, rhs: Spark_Connect_Limit) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._limit != rhs_storage._limit {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Offset: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Offset" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "offset"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _offset: Int32 = 0 + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _offset = source._offset + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &_storage._offset) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._offset != 0 { + try visitor.visitSingularInt32Field(value: _storage._offset, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Offset, rhs: Spark_Connect_Offset) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._offset != rhs_storage._offset {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Tail: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Tail" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "limit"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _limit: Int32 = 0 + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _limit = source._limit + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &_storage._limit) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._limit != 0 { + try visitor.visitSingularInt32Field(value: _storage._limit, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Tail, rhs: Spark_Connect_Tail) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._limit != rhs_storage._limit {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Aggregate: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Aggregate" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "group_type"), + 3: .standard(proto: "grouping_expressions"), + 4: .standard(proto: "aggregate_expressions"), + 5: .same(proto: "pivot"), + 6: .standard(proto: "grouping_sets"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _groupType: Spark_Connect_Aggregate.GroupType = .unspecified + var _groupingExpressions: [Spark_Connect_Expression] = [] + var _aggregateExpressions: [Spark_Connect_Expression] = [] + var _pivot: Spark_Connect_Aggregate.Pivot? = nil + var _groupingSets: [Spark_Connect_Aggregate.GroupingSets] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _groupType = source._groupType + _groupingExpressions = source._groupingExpressions + _aggregateExpressions = source._aggregateExpressions + _pivot = source._pivot + _groupingSets = source._groupingSets + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularEnumField(value: &_storage._groupType) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._groupingExpressions) }() + case 4: try { try decoder.decodeRepeatedMessageField(value: &_storage._aggregateExpressions) }() + case 5: try { try decoder.decodeSingularMessageField(value: &_storage._pivot) }() + case 6: try { try decoder.decodeRepeatedMessageField(value: &_storage._groupingSets) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._groupType != .unspecified { + try visitor.visitSingularEnumField(value: _storage._groupType, fieldNumber: 2) + } + if !_storage._groupingExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._groupingExpressions, fieldNumber: 3) + } + if !_storage._aggregateExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._aggregateExpressions, fieldNumber: 4) + } + try { if let v = _storage._pivot { + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + } }() + if !_storage._groupingSets.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._groupingSets, fieldNumber: 6) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Aggregate, rhs: Spark_Connect_Aggregate) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._groupType != rhs_storage._groupType {return false} + if _storage._groupingExpressions != rhs_storage._groupingExpressions {return false} + if _storage._aggregateExpressions != rhs_storage._aggregateExpressions {return false} + if _storage._pivot != rhs_storage._pivot {return false} + if _storage._groupingSets != rhs_storage._groupingSets {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Aggregate.GroupType: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "GROUP_TYPE_UNSPECIFIED"), + 1: .same(proto: "GROUP_TYPE_GROUPBY"), + 2: .same(proto: "GROUP_TYPE_ROLLUP"), + 3: .same(proto: "GROUP_TYPE_CUBE"), + 4: .same(proto: "GROUP_TYPE_PIVOT"), + 5: .same(proto: "GROUP_TYPE_GROUPING_SETS"), + ] +} + +extension Spark_Connect_Aggregate.Pivot: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Aggregate.protoMessageName + ".Pivot" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "col"), + 2: .same(proto: "values"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._col) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.values) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._col { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !self.values.isEmpty { + try visitor.visitRepeatedMessageField(value: self.values, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Aggregate.Pivot, rhs: Spark_Connect_Aggregate.Pivot) -> Bool { + if lhs._col != rhs._col {return false} + if lhs.values != rhs.values {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Aggregate.GroupingSets: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Aggregate.protoMessageName + ".GroupingSets" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "grouping_set"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.groupingSet) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.groupingSet.isEmpty { + try visitor.visitRepeatedMessageField(value: self.groupingSet, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Aggregate.GroupingSets, rhs: Spark_Connect_Aggregate.GroupingSets) -> Bool { + if lhs.groupingSet != rhs.groupingSet {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Sort: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Sort" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "order"), + 3: .standard(proto: "is_global"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _order: [Spark_Connect_Expression.SortOrder] = [] + var _isGlobal: Bool? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _order = source._order + _isGlobal = source._isGlobal + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._order) }() + case 3: try { try decoder.decodeSingularBoolField(value: &_storage._isGlobal) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._order.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._order, fieldNumber: 2) + } + try { if let v = _storage._isGlobal { + try visitor.visitSingularBoolField(value: v, fieldNumber: 3) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Sort, rhs: Spark_Connect_Sort) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._order != rhs_storage._order {return false} + if _storage._isGlobal != rhs_storage._isGlobal {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Drop: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Drop" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "columns"), + 3: .standard(proto: "column_names"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _columns: [Spark_Connect_Expression] = [] + var _columnNames: [String] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _columns = source._columns + _columnNames = source._columnNames + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._columns) }() + case 3: try { try decoder.decodeRepeatedStringField(value: &_storage._columnNames) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._columns.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._columns, fieldNumber: 2) + } + if !_storage._columnNames.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._columnNames, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Drop, rhs: Spark_Connect_Drop) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._columns != rhs_storage._columns {return false} + if _storage._columnNames != rhs_storage._columnNames {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Deduplicate: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Deduplicate" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "column_names"), + 3: .standard(proto: "all_columns_as_keys"), + 4: .standard(proto: "within_watermark"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _columnNames: [String] = [] + var _allColumnsAsKeys: Bool? = nil + var _withinWatermark: Bool? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _columnNames = source._columnNames + _allColumnsAsKeys = source._allColumnsAsKeys + _withinWatermark = source._withinWatermark + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._columnNames) }() + case 3: try { try decoder.decodeSingularBoolField(value: &_storage._allColumnsAsKeys) }() + case 4: try { try decoder.decodeSingularBoolField(value: &_storage._withinWatermark) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._columnNames.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._columnNames, fieldNumber: 2) + } + try { if let v = _storage._allColumnsAsKeys { + try visitor.visitSingularBoolField(value: v, fieldNumber: 3) + } }() + try { if let v = _storage._withinWatermark { + try visitor.visitSingularBoolField(value: v, fieldNumber: 4) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Deduplicate, rhs: Spark_Connect_Deduplicate) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._columnNames != rhs_storage._columnNames {return false} + if _storage._allColumnsAsKeys != rhs_storage._allColumnsAsKeys {return false} + if _storage._withinWatermark != rhs_storage._withinWatermark {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_LocalRelation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".LocalRelation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "data"), + 2: .same(proto: "schema"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBytesField(value: &self._data) }() + case 2: try { try decoder.decodeSingularStringField(value: &self._schema) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._data { + try visitor.visitSingularBytesField(value: v, fieldNumber: 1) + } }() + try { if let v = self._schema { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_LocalRelation, rhs: Spark_Connect_LocalRelation) -> Bool { + if lhs._data != rhs._data {return false} + if lhs._schema != rhs._schema {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CachedLocalRelation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CachedLocalRelation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 3: .same(proto: "hash"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 3: try { try decoder.decodeSingularStringField(value: &self.hash) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.hash.isEmpty { + try visitor.visitSingularStringField(value: self.hash, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CachedLocalRelation, rhs: Spark_Connect_CachedLocalRelation) -> Bool { + if lhs.hash != rhs.hash {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CachedRemoteRelation: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CachedRemoteRelation" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "relation_id"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.relationID) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.relationID.isEmpty { + try visitor.visitSingularStringField(value: self.relationID, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CachedRemoteRelation, rhs: Spark_Connect_CachedRemoteRelation) -> Bool { + if lhs.relationID != rhs.relationID {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Sample: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Sample" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "lower_bound"), + 3: .standard(proto: "upper_bound"), + 4: .standard(proto: "with_replacement"), + 5: .same(proto: "seed"), + 6: .standard(proto: "deterministic_order"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _lowerBound: Double = 0 + var _upperBound: Double = 0 + var _withReplacement: Bool? = nil + var _seed: Int64? = nil + var _deterministicOrder: Bool = false + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _lowerBound = source._lowerBound + _upperBound = source._upperBound + _withReplacement = source._withReplacement + _seed = source._seed + _deterministicOrder = source._deterministicOrder + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularDoubleField(value: &_storage._lowerBound) }() + case 3: try { try decoder.decodeSingularDoubleField(value: &_storage._upperBound) }() + case 4: try { try decoder.decodeSingularBoolField(value: &_storage._withReplacement) }() + case 5: try { try decoder.decodeSingularInt64Field(value: &_storage._seed) }() + case 6: try { try decoder.decodeSingularBoolField(value: &_storage._deterministicOrder) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._lowerBound.bitPattern != 0 { + try visitor.visitSingularDoubleField(value: _storage._lowerBound, fieldNumber: 2) + } + if _storage._upperBound.bitPattern != 0 { + try visitor.visitSingularDoubleField(value: _storage._upperBound, fieldNumber: 3) + } + try { if let v = _storage._withReplacement { + try visitor.visitSingularBoolField(value: v, fieldNumber: 4) + } }() + try { if let v = _storage._seed { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 5) + } }() + if _storage._deterministicOrder != false { + try visitor.visitSingularBoolField(value: _storage._deterministicOrder, fieldNumber: 6) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Sample, rhs: Spark_Connect_Sample) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._lowerBound != rhs_storage._lowerBound {return false} + if _storage._upperBound != rhs_storage._upperBound {return false} + if _storage._withReplacement != rhs_storage._withReplacement {return false} + if _storage._seed != rhs_storage._seed {return false} + if _storage._deterministicOrder != rhs_storage._deterministicOrder {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Range: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Range" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "start"), + 2: .same(proto: "end"), + 3: .same(proto: "step"), + 4: .standard(proto: "num_partitions"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt64Field(value: &self._start) }() + case 2: try { try decoder.decodeSingularInt64Field(value: &self.end) }() + case 3: try { try decoder.decodeSingularInt64Field(value: &self.step) }() + case 4: try { try decoder.decodeSingularInt32Field(value: &self._numPartitions) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._start { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 1) + } }() + if self.end != 0 { + try visitor.visitSingularInt64Field(value: self.end, fieldNumber: 2) + } + if self.step != 0 { + try visitor.visitSingularInt64Field(value: self.step, fieldNumber: 3) + } + try { if let v = self._numPartitions { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 4) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Range, rhs: Spark_Connect_Range) -> Bool { + if lhs._start != rhs._start {return false} + if lhs.end != rhs.end {return false} + if lhs.step != rhs.step {return false} + if lhs._numPartitions != rhs._numPartitions {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_SubqueryAlias: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".SubqueryAlias" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "alias"), + 3: .same(proto: "qualifier"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _alias: String = String() + var _qualifier: [String] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _alias = source._alias + _qualifier = source._qualifier + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._alias) }() + case 3: try { try decoder.decodeRepeatedStringField(value: &_storage._qualifier) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._alias.isEmpty { + try visitor.visitSingularStringField(value: _storage._alias, fieldNumber: 2) + } + if !_storage._qualifier.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._qualifier, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_SubqueryAlias, rhs: Spark_Connect_SubqueryAlias) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._alias != rhs_storage._alias {return false} + if _storage._qualifier != rhs_storage._qualifier {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Repartition: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Repartition" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "num_partitions"), + 3: .same(proto: "shuffle"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _numPartitions: Int32 = 0 + var _shuffle: Bool? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _numPartitions = source._numPartitions + _shuffle = source._shuffle + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &_storage._numPartitions) }() + case 3: try { try decoder.decodeSingularBoolField(value: &_storage._shuffle) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._numPartitions != 0 { + try visitor.visitSingularInt32Field(value: _storage._numPartitions, fieldNumber: 2) + } + try { if let v = _storage._shuffle { + try visitor.visitSingularBoolField(value: v, fieldNumber: 3) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Repartition, rhs: Spark_Connect_Repartition) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._numPartitions != rhs_storage._numPartitions {return false} + if _storage._shuffle != rhs_storage._shuffle {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ShowString: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ShowString" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "num_rows"), + 3: .same(proto: "truncate"), + 4: .same(proto: "vertical"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _numRows: Int32 = 0 + var _truncate: Int32 = 0 + var _vertical: Bool = false + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _numRows = source._numRows + _truncate = source._truncate + _vertical = source._vertical + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &_storage._numRows) }() + case 3: try { try decoder.decodeSingularInt32Field(value: &_storage._truncate) }() + case 4: try { try decoder.decodeSingularBoolField(value: &_storage._vertical) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._numRows != 0 { + try visitor.visitSingularInt32Field(value: _storage._numRows, fieldNumber: 2) + } + if _storage._truncate != 0 { + try visitor.visitSingularInt32Field(value: _storage._truncate, fieldNumber: 3) + } + if _storage._vertical != false { + try visitor.visitSingularBoolField(value: _storage._vertical, fieldNumber: 4) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ShowString, rhs: Spark_Connect_ShowString) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._numRows != rhs_storage._numRows {return false} + if _storage._truncate != rhs_storage._truncate {return false} + if _storage._vertical != rhs_storage._vertical {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_HtmlString: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".HtmlString" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "num_rows"), + 3: .same(proto: "truncate"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _numRows: Int32 = 0 + var _truncate: Int32 = 0 + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _numRows = source._numRows + _truncate = source._truncate + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &_storage._numRows) }() + case 3: try { try decoder.decodeSingularInt32Field(value: &_storage._truncate) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._numRows != 0 { + try visitor.visitSingularInt32Field(value: _storage._numRows, fieldNumber: 2) + } + if _storage._truncate != 0 { + try visitor.visitSingularInt32Field(value: _storage._truncate, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_HtmlString, rhs: Spark_Connect_HtmlString) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._numRows != rhs_storage._numRows {return false} + if _storage._truncate != rhs_storage._truncate {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StatSummary: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StatSummary" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "statistics"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _statistics: [String] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _statistics = source._statistics + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._statistics) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._statistics.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._statistics, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StatSummary, rhs: Spark_Connect_StatSummary) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._statistics != rhs_storage._statistics {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StatDescribe: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StatDescribe" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "cols"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _cols: [String] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _cols = source._cols + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._cols) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._cols.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._cols, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StatDescribe, rhs: Spark_Connect_StatDescribe) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._cols != rhs_storage._cols {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StatCrosstab: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StatCrosstab" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "col1"), + 3: .same(proto: "col2"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _col1: String = String() + var _col2: String = String() + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _col1 = source._col1 + _col2 = source._col2 + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._col1) }() + case 3: try { try decoder.decodeSingularStringField(value: &_storage._col2) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._col1.isEmpty { + try visitor.visitSingularStringField(value: _storage._col1, fieldNumber: 2) + } + if !_storage._col2.isEmpty { + try visitor.visitSingularStringField(value: _storage._col2, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StatCrosstab, rhs: Spark_Connect_StatCrosstab) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._col1 != rhs_storage._col1 {return false} + if _storage._col2 != rhs_storage._col2 {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StatCov: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StatCov" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "col1"), + 3: .same(proto: "col2"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _col1: String = String() + var _col2: String = String() + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _col1 = source._col1 + _col2 = source._col2 + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._col1) }() + case 3: try { try decoder.decodeSingularStringField(value: &_storage._col2) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._col1.isEmpty { + try visitor.visitSingularStringField(value: _storage._col1, fieldNumber: 2) + } + if !_storage._col2.isEmpty { + try visitor.visitSingularStringField(value: _storage._col2, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StatCov, rhs: Spark_Connect_StatCov) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._col1 != rhs_storage._col1 {return false} + if _storage._col2 != rhs_storage._col2 {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StatCorr: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StatCorr" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "col1"), + 3: .same(proto: "col2"), + 4: .same(proto: "method"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _col1: String = String() + var _col2: String = String() + var _method: String? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _col1 = source._col1 + _col2 = source._col2 + _method = source._method + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._col1) }() + case 3: try { try decoder.decodeSingularStringField(value: &_storage._col2) }() + case 4: try { try decoder.decodeSingularStringField(value: &_storage._method) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._col1.isEmpty { + try visitor.visitSingularStringField(value: _storage._col1, fieldNumber: 2) + } + if !_storage._col2.isEmpty { + try visitor.visitSingularStringField(value: _storage._col2, fieldNumber: 3) + } + try { if let v = _storage._method { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StatCorr, rhs: Spark_Connect_StatCorr) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._col1 != rhs_storage._col1 {return false} + if _storage._col2 != rhs_storage._col2 {return false} + if _storage._method != rhs_storage._method {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StatApproxQuantile: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StatApproxQuantile" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "cols"), + 3: .same(proto: "probabilities"), + 4: .standard(proto: "relative_error"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _cols: [String] = [] + var _probabilities: [Double] = [] + var _relativeError: Double = 0 + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _cols = source._cols + _probabilities = source._probabilities + _relativeError = source._relativeError + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._cols) }() + case 3: try { try decoder.decodeRepeatedDoubleField(value: &_storage._probabilities) }() + case 4: try { try decoder.decodeSingularDoubleField(value: &_storage._relativeError) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._cols.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._cols, fieldNumber: 2) + } + if !_storage._probabilities.isEmpty { + try visitor.visitPackedDoubleField(value: _storage._probabilities, fieldNumber: 3) + } + if _storage._relativeError.bitPattern != 0 { + try visitor.visitSingularDoubleField(value: _storage._relativeError, fieldNumber: 4) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StatApproxQuantile, rhs: Spark_Connect_StatApproxQuantile) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._cols != rhs_storage._cols {return false} + if _storage._probabilities != rhs_storage._probabilities {return false} + if _storage._relativeError != rhs_storage._relativeError {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StatFreqItems: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StatFreqItems" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "cols"), + 3: .same(proto: "support"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _cols: [String] = [] + var _support: Double? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _cols = source._cols + _support = source._support + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._cols) }() + case 3: try { try decoder.decodeSingularDoubleField(value: &_storage._support) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._cols.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._cols, fieldNumber: 2) + } + try { if let v = _storage._support { + try visitor.visitSingularDoubleField(value: v, fieldNumber: 3) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StatFreqItems, rhs: Spark_Connect_StatFreqItems) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._cols != rhs_storage._cols {return false} + if _storage._support != rhs_storage._support {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StatSampleBy: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".StatSampleBy" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "col"), + 3: .same(proto: "fractions"), + 5: .same(proto: "seed"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _col: Spark_Connect_Expression? = nil + var _fractions: [Spark_Connect_StatSampleBy.Fraction] = [] + var _seed: Int64? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _col = source._col + _fractions = source._fractions + _seed = source._seed + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._col) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._fractions) }() + case 5: try { try decoder.decodeSingularInt64Field(value: &_storage._seed) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._col { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if !_storage._fractions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._fractions, fieldNumber: 3) + } + try { if let v = _storage._seed { + try visitor.visitSingularInt64Field(value: v, fieldNumber: 5) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StatSampleBy, rhs: Spark_Connect_StatSampleBy) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._col != rhs_storage._col {return false} + if _storage._fractions != rhs_storage._fractions {return false} + if _storage._seed != rhs_storage._seed {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_StatSampleBy.Fraction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_StatSampleBy.protoMessageName + ".Fraction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "stratum"), + 2: .same(proto: "fraction"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._stratum) }() + case 2: try { try decoder.decodeSingularDoubleField(value: &self.fraction) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._stratum { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if self.fraction.bitPattern != 0 { + try visitor.visitSingularDoubleField(value: self.fraction, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_StatSampleBy.Fraction, rhs: Spark_Connect_StatSampleBy.Fraction) -> Bool { + if lhs._stratum != rhs._stratum {return false} + if lhs.fraction != rhs.fraction {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_NAFill: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".NAFill" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "cols"), + 3: .same(proto: "values"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _cols: [String] = [] + var _values: [Spark_Connect_Expression.Literal] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _cols = source._cols + _values = source._values + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._cols) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._values) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._cols.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._cols, fieldNumber: 2) + } + if !_storage._values.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._values, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_NAFill, rhs: Spark_Connect_NAFill) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._cols != rhs_storage._cols {return false} + if _storage._values != rhs_storage._values {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_NADrop: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".NADrop" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "cols"), + 3: .standard(proto: "min_non_nulls"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _cols: [String] = [] + var _minNonNulls: Int32? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _cols = source._cols + _minNonNulls = source._minNonNulls + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._cols) }() + case 3: try { try decoder.decodeSingularInt32Field(value: &_storage._minNonNulls) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._cols.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._cols, fieldNumber: 2) + } + try { if let v = _storage._minNonNulls { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 3) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_NADrop, rhs: Spark_Connect_NADrop) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._cols != rhs_storage._cols {return false} + if _storage._minNonNulls != rhs_storage._minNonNulls {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_NAReplace: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".NAReplace" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "cols"), + 3: .same(proto: "replacements"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _cols: [String] = [] + var _replacements: [Spark_Connect_NAReplace.Replacement] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _cols = source._cols + _replacements = source._replacements + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._cols) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._replacements) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._cols.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._cols, fieldNumber: 2) + } + if !_storage._replacements.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._replacements, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_NAReplace, rhs: Spark_Connect_NAReplace) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._cols != rhs_storage._cols {return false} + if _storage._replacements != rhs_storage._replacements {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_NAReplace.Replacement: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_NAReplace.protoMessageName + ".Replacement" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "old_value"), + 2: .standard(proto: "new_value"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._oldValue) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._newValue) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._oldValue { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = self._newValue { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_NAReplace.Replacement, rhs: Spark_Connect_NAReplace.Replacement) -> Bool { + if lhs._oldValue != rhs._oldValue {return false} + if lhs._newValue != rhs._newValue {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ToDF: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ToDF" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "column_names"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _columnNames: [String] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _columnNames = source._columnNames + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedStringField(value: &_storage._columnNames) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._columnNames.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._columnNames, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ToDF, rhs: Spark_Connect_ToDF) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._columnNames != rhs_storage._columnNames {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WithColumnsRenamed: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".WithColumnsRenamed" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "rename_columns_map"), + 3: .same(proto: "renames"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _renameColumnsMap: Dictionary = [:] + var _renames: [Spark_Connect_WithColumnsRenamed.Rename] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _renameColumnsMap = source._renameColumnsMap + _renames = source._renames + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &_storage._renameColumnsMap) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._renames) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._renameColumnsMap.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: _storage._renameColumnsMap, fieldNumber: 2) + } + if !_storage._renames.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._renames, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WithColumnsRenamed, rhs: Spark_Connect_WithColumnsRenamed) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._renameColumnsMap != rhs_storage._renameColumnsMap {return false} + if _storage._renames != rhs_storage._renames {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WithColumnsRenamed.Rename: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_WithColumnsRenamed.protoMessageName + ".Rename" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "col_name"), + 2: .standard(proto: "new_col_name"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.colName) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.newColName) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.colName.isEmpty { + try visitor.visitSingularStringField(value: self.colName, fieldNumber: 1) + } + if !self.newColName.isEmpty { + try visitor.visitSingularStringField(value: self.newColName, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WithColumnsRenamed.Rename, rhs: Spark_Connect_WithColumnsRenamed.Rename) -> Bool { + if lhs.colName != rhs.colName {return false} + if lhs.newColName != rhs.newColName {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WithColumns: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".WithColumns" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "aliases"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _aliases: [Spark_Connect_Expression.Alias] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _aliases = source._aliases + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._aliases) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._aliases.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._aliases, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WithColumns, rhs: Spark_Connect_WithColumns) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._aliases != rhs_storage._aliases {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_WithWatermark: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".WithWatermark" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "event_time"), + 3: .standard(proto: "delay_threshold"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _eventTime: String = String() + var _delayThreshold: String = String() + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _eventTime = source._eventTime + _delayThreshold = source._delayThreshold + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._eventTime) }() + case 3: try { try decoder.decodeSingularStringField(value: &_storage._delayThreshold) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._eventTime.isEmpty { + try visitor.visitSingularStringField(value: _storage._eventTime, fieldNumber: 2) + } + if !_storage._delayThreshold.isEmpty { + try visitor.visitSingularStringField(value: _storage._delayThreshold, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_WithWatermark, rhs: Spark_Connect_WithWatermark) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._eventTime != rhs_storage._eventTime {return false} + if _storage._delayThreshold != rhs_storage._delayThreshold {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Hint: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Hint" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "name"), + 3: .same(proto: "parameters"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _name: String = String() + var _parameters: [Spark_Connect_Expression] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _name = source._name + _parameters = source._parameters + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._name) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._parameters) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._name.isEmpty { + try visitor.visitSingularStringField(value: _storage._name, fieldNumber: 2) + } + if !_storage._parameters.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._parameters, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Hint, rhs: Spark_Connect_Hint) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._name != rhs_storage._name {return false} + if _storage._parameters != rhs_storage._parameters {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Unpivot: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Unpivot" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "ids"), + 3: .same(proto: "values"), + 4: .standard(proto: "variable_column_name"), + 5: .standard(proto: "value_column_name"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _ids: [Spark_Connect_Expression] = [] + var _values: Spark_Connect_Unpivot.Values? = nil + var _variableColumnName: String = String() + var _valueColumnName: String = String() + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _ids = source._ids + _values = source._values + _variableColumnName = source._variableColumnName + _valueColumnName = source._valueColumnName + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._ids) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._values) }() + case 4: try { try decoder.decodeSingularStringField(value: &_storage._variableColumnName) }() + case 5: try { try decoder.decodeSingularStringField(value: &_storage._valueColumnName) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._ids.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._ids, fieldNumber: 2) + } + try { if let v = _storage._values { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if !_storage._variableColumnName.isEmpty { + try visitor.visitSingularStringField(value: _storage._variableColumnName, fieldNumber: 4) + } + if !_storage._valueColumnName.isEmpty { + try visitor.visitSingularStringField(value: _storage._valueColumnName, fieldNumber: 5) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Unpivot, rhs: Spark_Connect_Unpivot) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._ids != rhs_storage._ids {return false} + if _storage._values != rhs_storage._values {return false} + if _storage._variableColumnName != rhs_storage._variableColumnName {return false} + if _storage._valueColumnName != rhs_storage._valueColumnName {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Unpivot.Values: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_Unpivot.protoMessageName + ".Values" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "values"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.values) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.values.isEmpty { + try visitor.visitRepeatedMessageField(value: self.values, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Unpivot.Values, rhs: Spark_Connect_Unpivot.Values) -> Bool { + if lhs.values != rhs.values {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Transpose: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Transpose" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "index_columns"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _indexColumns: [Spark_Connect_Expression] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _indexColumns = source._indexColumns + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._indexColumns) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._indexColumns.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._indexColumns, fieldNumber: 2) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Transpose, rhs: Spark_Connect_Transpose) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._indexColumns != rhs_storage._indexColumns {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_UnresolvedTableValuedFunction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".UnresolvedTableValuedFunction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "function_name"), + 2: .same(proto: "arguments"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.functionName) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &self.arguments) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.functionName.isEmpty { + try visitor.visitSingularStringField(value: self.functionName, fieldNumber: 1) + } + if !self.arguments.isEmpty { + try visitor.visitRepeatedMessageField(value: self.arguments, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_UnresolvedTableValuedFunction, rhs: Spark_Connect_UnresolvedTableValuedFunction) -> Bool { + if lhs.functionName != rhs.functionName {return false} + if lhs.arguments != rhs.arguments {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ToSchema: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ToSchema" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "schema"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _schema: Spark_Connect_DataType? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _schema = source._schema + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._schema) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._schema { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ToSchema, rhs: Spark_Connect_ToSchema) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._schema != rhs_storage._schema {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_RepartitionByExpression: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".RepartitionByExpression" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "partition_exprs"), + 3: .standard(proto: "num_partitions"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _partitionExprs: [Spark_Connect_Expression] = [] + var _numPartitions: Int32? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _partitionExprs = source._partitionExprs + _numPartitions = source._numPartitions + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._partitionExprs) }() + case 3: try { try decoder.decodeSingularInt32Field(value: &_storage._numPartitions) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._partitionExprs.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._partitionExprs, fieldNumber: 2) + } + try { if let v = _storage._numPartitions { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 3) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_RepartitionByExpression, rhs: Spark_Connect_RepartitionByExpression) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._partitionExprs != rhs_storage._partitionExprs {return false} + if _storage._numPartitions != rhs_storage._numPartitions {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_MapPartitions: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".MapPartitions" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "func"), + 3: .standard(proto: "is_barrier"), + 4: .standard(proto: "profile_id"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _func: Spark_Connect_CommonInlineUserDefinedFunction? = nil + var _isBarrier: Bool? = nil + var _profileID: Int32? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _func = source._func + _isBarrier = source._isBarrier + _profileID = source._profileID + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._func) }() + case 3: try { try decoder.decodeSingularBoolField(value: &_storage._isBarrier) }() + case 4: try { try decoder.decodeSingularInt32Field(value: &_storage._profileID) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._func { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = _storage._isBarrier { + try visitor.visitSingularBoolField(value: v, fieldNumber: 3) + } }() + try { if let v = _storage._profileID { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 4) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_MapPartitions, rhs: Spark_Connect_MapPartitions) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._func != rhs_storage._func {return false} + if _storage._isBarrier != rhs_storage._isBarrier {return false} + if _storage._profileID != rhs_storage._profileID {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_GroupMap: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".GroupMap" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "grouping_expressions"), + 3: .same(proto: "func"), + 4: .standard(proto: "sorting_expressions"), + 5: .standard(proto: "initial_input"), + 6: .standard(proto: "initial_grouping_expressions"), + 7: .standard(proto: "is_map_groups_with_state"), + 8: .standard(proto: "output_mode"), + 9: .standard(proto: "timeout_conf"), + 10: .standard(proto: "state_schema"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _groupingExpressions: [Spark_Connect_Expression] = [] + var _func: Spark_Connect_CommonInlineUserDefinedFunction? = nil + var _sortingExpressions: [Spark_Connect_Expression] = [] + var _initialInput: Spark_Connect_Relation? = nil + var _initialGroupingExpressions: [Spark_Connect_Expression] = [] + var _isMapGroupsWithState: Bool? = nil + var _outputMode: String? = nil + var _timeoutConf: String? = nil + var _stateSchema: Spark_Connect_DataType? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _groupingExpressions = source._groupingExpressions + _func = source._func + _sortingExpressions = source._sortingExpressions + _initialInput = source._initialInput + _initialGroupingExpressions = source._initialGroupingExpressions + _isMapGroupsWithState = source._isMapGroupsWithState + _outputMode = source._outputMode + _timeoutConf = source._timeoutConf + _stateSchema = source._stateSchema + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._groupingExpressions) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._func) }() + case 4: try { try decoder.decodeRepeatedMessageField(value: &_storage._sortingExpressions) }() + case 5: try { try decoder.decodeSingularMessageField(value: &_storage._initialInput) }() + case 6: try { try decoder.decodeRepeatedMessageField(value: &_storage._initialGroupingExpressions) }() + case 7: try { try decoder.decodeSingularBoolField(value: &_storage._isMapGroupsWithState) }() + case 8: try { try decoder.decodeSingularStringField(value: &_storage._outputMode) }() + case 9: try { try decoder.decodeSingularStringField(value: &_storage._timeoutConf) }() + case 10: try { try decoder.decodeSingularMessageField(value: &_storage._stateSchema) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._groupingExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._groupingExpressions, fieldNumber: 2) + } + try { if let v = _storage._func { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if !_storage._sortingExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._sortingExpressions, fieldNumber: 4) + } + try { if let v = _storage._initialInput { + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + } }() + if !_storage._initialGroupingExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._initialGroupingExpressions, fieldNumber: 6) + } + try { if let v = _storage._isMapGroupsWithState { + try visitor.visitSingularBoolField(value: v, fieldNumber: 7) + } }() + try { if let v = _storage._outputMode { + try visitor.visitSingularStringField(value: v, fieldNumber: 8) + } }() + try { if let v = _storage._timeoutConf { + try visitor.visitSingularStringField(value: v, fieldNumber: 9) + } }() + try { if let v = _storage._stateSchema { + try visitor.visitSingularMessageField(value: v, fieldNumber: 10) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_GroupMap, rhs: Spark_Connect_GroupMap) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._groupingExpressions != rhs_storage._groupingExpressions {return false} + if _storage._func != rhs_storage._func {return false} + if _storage._sortingExpressions != rhs_storage._sortingExpressions {return false} + if _storage._initialInput != rhs_storage._initialInput {return false} + if _storage._initialGroupingExpressions != rhs_storage._initialGroupingExpressions {return false} + if _storage._isMapGroupsWithState != rhs_storage._isMapGroupsWithState {return false} + if _storage._outputMode != rhs_storage._outputMode {return false} + if _storage._timeoutConf != rhs_storage._timeoutConf {return false} + if _storage._stateSchema != rhs_storage._stateSchema {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CoGroupMap: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CoGroupMap" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "input_grouping_expressions"), + 3: .same(proto: "other"), + 4: .standard(proto: "other_grouping_expressions"), + 5: .same(proto: "func"), + 6: .standard(proto: "input_sorting_expressions"), + 7: .standard(proto: "other_sorting_expressions"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _inputGroupingExpressions: [Spark_Connect_Expression] = [] + var _other: Spark_Connect_Relation? = nil + var _otherGroupingExpressions: [Spark_Connect_Expression] = [] + var _func: Spark_Connect_CommonInlineUserDefinedFunction? = nil + var _inputSortingExpressions: [Spark_Connect_Expression] = [] + var _otherSortingExpressions: [Spark_Connect_Expression] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _inputGroupingExpressions = source._inputGroupingExpressions + _other = source._other + _otherGroupingExpressions = source._otherGroupingExpressions + _func = source._func + _inputSortingExpressions = source._inputSortingExpressions + _otherSortingExpressions = source._otherSortingExpressions + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._inputGroupingExpressions) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._other) }() + case 4: try { try decoder.decodeRepeatedMessageField(value: &_storage._otherGroupingExpressions) }() + case 5: try { try decoder.decodeSingularMessageField(value: &_storage._func) }() + case 6: try { try decoder.decodeRepeatedMessageField(value: &_storage._inputSortingExpressions) }() + case 7: try { try decoder.decodeRepeatedMessageField(value: &_storage._otherSortingExpressions) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._inputGroupingExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._inputGroupingExpressions, fieldNumber: 2) + } + try { if let v = _storage._other { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if !_storage._otherGroupingExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._otherGroupingExpressions, fieldNumber: 4) + } + try { if let v = _storage._func { + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + } }() + if !_storage._inputSortingExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._inputSortingExpressions, fieldNumber: 6) + } + if !_storage._otherSortingExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._otherSortingExpressions, fieldNumber: 7) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CoGroupMap, rhs: Spark_Connect_CoGroupMap) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._inputGroupingExpressions != rhs_storage._inputGroupingExpressions {return false} + if _storage._other != rhs_storage._other {return false} + if _storage._otherGroupingExpressions != rhs_storage._otherGroupingExpressions {return false} + if _storage._func != rhs_storage._func {return false} + if _storage._inputSortingExpressions != rhs_storage._inputSortingExpressions {return false} + if _storage._otherSortingExpressions != rhs_storage._otherSortingExpressions {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_ApplyInPandasWithState: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".ApplyInPandasWithState" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .standard(proto: "grouping_expressions"), + 3: .same(proto: "func"), + 4: .standard(proto: "output_schema"), + 5: .standard(proto: "state_schema"), + 6: .standard(proto: "output_mode"), + 7: .standard(proto: "timeout_conf"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _groupingExpressions: [Spark_Connect_Expression] = [] + var _func: Spark_Connect_CommonInlineUserDefinedFunction? = nil + var _outputSchema: String = String() + var _stateSchema: String = String() + var _outputMode: String = String() + var _timeoutConf: String = String() + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _groupingExpressions = source._groupingExpressions + _func = source._func + _outputSchema = source._outputSchema + _stateSchema = source._stateSchema + _outputMode = source._outputMode + _timeoutConf = source._timeoutConf + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeRepeatedMessageField(value: &_storage._groupingExpressions) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._func) }() + case 4: try { try decoder.decodeSingularStringField(value: &_storage._outputSchema) }() + case 5: try { try decoder.decodeSingularStringField(value: &_storage._stateSchema) }() + case 6: try { try decoder.decodeSingularStringField(value: &_storage._outputMode) }() + case 7: try { try decoder.decodeSingularStringField(value: &_storage._timeoutConf) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._groupingExpressions.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._groupingExpressions, fieldNumber: 2) + } + try { if let v = _storage._func { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if !_storage._outputSchema.isEmpty { + try visitor.visitSingularStringField(value: _storage._outputSchema, fieldNumber: 4) + } + if !_storage._stateSchema.isEmpty { + try visitor.visitSingularStringField(value: _storage._stateSchema, fieldNumber: 5) + } + if !_storage._outputMode.isEmpty { + try visitor.visitSingularStringField(value: _storage._outputMode, fieldNumber: 6) + } + if !_storage._timeoutConf.isEmpty { + try visitor.visitSingularStringField(value: _storage._timeoutConf, fieldNumber: 7) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_ApplyInPandasWithState, rhs: Spark_Connect_ApplyInPandasWithState) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._groupingExpressions != rhs_storage._groupingExpressions {return false} + if _storage._func != rhs_storage._func {return false} + if _storage._outputSchema != rhs_storage._outputSchema {return false} + if _storage._stateSchema != rhs_storage._stateSchema {return false} + if _storage._outputMode != rhs_storage._outputMode {return false} + if _storage._timeoutConf != rhs_storage._timeoutConf {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CommonInlineUserDefinedTableFunction: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CommonInlineUserDefinedTableFunction" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "function_name"), + 2: .same(proto: "deterministic"), + 3: .same(proto: "arguments"), + 4: .standard(proto: "python_udtf"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.functionName) }() + case 2: try { try decoder.decodeSingularBoolField(value: &self.deterministic) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &self.arguments) }() + case 4: try { + var v: Spark_Connect_PythonUDTF? + var hadOneofValue = false + if let current = self.function { + hadOneofValue = true + if case .pythonUdtf(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.function = .pythonUdtf(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.functionName.isEmpty { + try visitor.visitSingularStringField(value: self.functionName, fieldNumber: 1) + } + if self.deterministic != false { + try visitor.visitSingularBoolField(value: self.deterministic, fieldNumber: 2) + } + if !self.arguments.isEmpty { + try visitor.visitRepeatedMessageField(value: self.arguments, fieldNumber: 3) + } + try { if case .pythonUdtf(let v)? = self.function { + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CommonInlineUserDefinedTableFunction, rhs: Spark_Connect_CommonInlineUserDefinedTableFunction) -> Bool { + if lhs.functionName != rhs.functionName {return false} + if lhs.deterministic != rhs.deterministic {return false} + if lhs.arguments != rhs.arguments {return false} + if lhs.function != rhs.function {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_PythonUDTF: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".PythonUDTF" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "return_type"), + 2: .standard(proto: "eval_type"), + 3: .same(proto: "command"), + 4: .standard(proto: "python_ver"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &self._returnType) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self.evalType) }() + case 3: try { try decoder.decodeSingularBytesField(value: &self.command) }() + case 4: try { try decoder.decodeSingularStringField(value: &self.pythonVer) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._returnType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if self.evalType != 0 { + try visitor.visitSingularInt32Field(value: self.evalType, fieldNumber: 2) + } + if !self.command.isEmpty { + try visitor.visitSingularBytesField(value: self.command, fieldNumber: 3) + } + if !self.pythonVer.isEmpty { + try visitor.visitSingularStringField(value: self.pythonVer, fieldNumber: 4) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_PythonUDTF, rhs: Spark_Connect_PythonUDTF) -> Bool { + if lhs._returnType != rhs._returnType {return false} + if lhs.evalType != rhs.evalType {return false} + if lhs.command != rhs.command {return false} + if lhs.pythonVer != rhs.pythonVer {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CommonInlineUserDefinedDataSource: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CommonInlineUserDefinedDataSource" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .standard(proto: "python_data_source"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { + var v: Spark_Connect_PythonDataSource? + var hadOneofValue = false + if let current = self.dataSource { + hadOneofValue = true + if case .pythonDataSource(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + self.dataSource = .pythonDataSource(v) + } + }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + try { if case .pythonDataSource(let v)? = self.dataSource { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CommonInlineUserDefinedDataSource, rhs: Spark_Connect_CommonInlineUserDefinedDataSource) -> Bool { + if lhs.name != rhs.name {return false} + if lhs.dataSource != rhs.dataSource {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_PythonDataSource: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".PythonDataSource" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "command"), + 2: .standard(proto: "python_ver"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularBytesField(value: &self.command) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.pythonVer) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.command.isEmpty { + try visitor.visitSingularBytesField(value: self.command, fieldNumber: 1) + } + if !self.pythonVer.isEmpty { + try visitor.visitSingularStringField(value: self.pythonVer, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_PythonDataSource, rhs: Spark_Connect_PythonDataSource) -> Bool { + if lhs.command != rhs.command {return false} + if lhs.pythonVer != rhs.pythonVer {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_CollectMetrics: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".CollectMetrics" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "name"), + 3: .same(proto: "metrics"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _name: String = String() + var _metrics: [Spark_Connect_Expression] = [] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _name = source._name + _metrics = source._metrics + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._name) }() + case 3: try { try decoder.decodeRepeatedMessageField(value: &_storage._metrics) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if !_storage._name.isEmpty { + try visitor.visitSingularStringField(value: _storage._name, fieldNumber: 2) + } + if !_storage._metrics.isEmpty { + try visitor.visitRepeatedMessageField(value: _storage._metrics, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_CollectMetrics, rhs: Spark_Connect_CollectMetrics) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._name != rhs_storage._name {return false} + if _storage._metrics != rhs_storage._metrics {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Parse: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".Parse" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "input"), + 2: .same(proto: "format"), + 3: .same(proto: "schema"), + 4: .same(proto: "options"), + ] + + fileprivate class _StorageClass { + var _input: Spark_Connect_Relation? = nil + var _format: Spark_Connect_Parse.ParseFormat = .unspecified + var _schema: Spark_Connect_DataType? = nil + var _options: Dictionary = [:] + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _input = source._input + _format = source._format + _schema = source._schema + _options = source._options + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._input) }() + case 2: try { try decoder.decodeSingularEnumField(value: &_storage._format) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._schema) }() + case 4: try { try decoder.decodeMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: &_storage._options) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._input { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._format != .unspecified { + try visitor.visitSingularEnumField(value: _storage._format, fieldNumber: 2) + } + try { if let v = _storage._schema { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if !_storage._options.isEmpty { + try visitor.visitMapField(fieldType: SwiftProtobuf._ProtobufMap.self, value: _storage._options, fieldNumber: 4) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_Parse, rhs: Spark_Connect_Parse) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._input != rhs_storage._input {return false} + if _storage._format != rhs_storage._format {return false} + if _storage._schema != rhs_storage._schema {return false} + if _storage._options != rhs_storage._options {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_Parse.ParseFormat: SwiftProtobuf._ProtoNameProviding { + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 0: .same(proto: "PARSE_FORMAT_UNSPECIFIED"), + 1: .same(proto: "PARSE_FORMAT_CSV"), + 2: .same(proto: "PARSE_FORMAT_JSON"), + ] +} + +extension Spark_Connect_AsOfJoin: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".AsOfJoin" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "left"), + 2: .same(proto: "right"), + 3: .standard(proto: "left_as_of"), + 4: .standard(proto: "right_as_of"), + 5: .standard(proto: "join_expr"), + 6: .standard(proto: "using_columns"), + 7: .standard(proto: "join_type"), + 8: .same(proto: "tolerance"), + 9: .standard(proto: "allow_exact_matches"), + 10: .same(proto: "direction"), + ] + + fileprivate class _StorageClass { + var _left: Spark_Connect_Relation? = nil + var _right: Spark_Connect_Relation? = nil + var _leftAsOf: Spark_Connect_Expression? = nil + var _rightAsOf: Spark_Connect_Expression? = nil + var _joinExpr: Spark_Connect_Expression? = nil + var _usingColumns: [String] = [] + var _joinType: String = String() + var _tolerance: Spark_Connect_Expression? = nil + var _allowExactMatches: Bool = false + var _direction: String = String() + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _left = source._left + _right = source._right + _leftAsOf = source._leftAsOf + _rightAsOf = source._rightAsOf + _joinExpr = source._joinExpr + _usingColumns = source._usingColumns + _joinType = source._joinType + _tolerance = source._tolerance + _allowExactMatches = source._allowExactMatches + _direction = source._direction + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._left) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._right) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._leftAsOf) }() + case 4: try { try decoder.decodeSingularMessageField(value: &_storage._rightAsOf) }() + case 5: try { try decoder.decodeSingularMessageField(value: &_storage._joinExpr) }() + case 6: try { try decoder.decodeRepeatedStringField(value: &_storage._usingColumns) }() + case 7: try { try decoder.decodeSingularStringField(value: &_storage._joinType) }() + case 8: try { try decoder.decodeSingularMessageField(value: &_storage._tolerance) }() + case 9: try { try decoder.decodeSingularBoolField(value: &_storage._allowExactMatches) }() + case 10: try { try decoder.decodeSingularStringField(value: &_storage._direction) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._left { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._right { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = _storage._leftAsOf { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + try { if let v = _storage._rightAsOf { + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + } }() + try { if let v = _storage._joinExpr { + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + } }() + if !_storage._usingColumns.isEmpty { + try visitor.visitRepeatedStringField(value: _storage._usingColumns, fieldNumber: 6) + } + if !_storage._joinType.isEmpty { + try visitor.visitSingularStringField(value: _storage._joinType, fieldNumber: 7) + } + try { if let v = _storage._tolerance { + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + } }() + if _storage._allowExactMatches != false { + try visitor.visitSingularBoolField(value: _storage._allowExactMatches, fieldNumber: 9) + } + if !_storage._direction.isEmpty { + try visitor.visitSingularStringField(value: _storage._direction, fieldNumber: 10) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_AsOfJoin, rhs: Spark_Connect_AsOfJoin) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._left != rhs_storage._left {return false} + if _storage._right != rhs_storage._right {return false} + if _storage._leftAsOf != rhs_storage._leftAsOf {return false} + if _storage._rightAsOf != rhs_storage._rightAsOf {return false} + if _storage._joinExpr != rhs_storage._joinExpr {return false} + if _storage._usingColumns != rhs_storage._usingColumns {return false} + if _storage._joinType != rhs_storage._joinType {return false} + if _storage._tolerance != rhs_storage._tolerance {return false} + if _storage._allowExactMatches != rhs_storage._allowExactMatches {return false} + if _storage._direction != rhs_storage._direction {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_LateralJoin: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".LateralJoin" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "left"), + 2: .same(proto: "right"), + 3: .standard(proto: "join_condition"), + 4: .standard(proto: "join_type"), + ] + + fileprivate class _StorageClass { + var _left: Spark_Connect_Relation? = nil + var _right: Spark_Connect_Relation? = nil + var _joinCondition: Spark_Connect_Expression? = nil + var _joinType: Spark_Connect_Join.JoinType = .unspecified + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _left = source._left + _right = source._right + _joinCondition = source._joinCondition + _joinType = source._joinType + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._left) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._right) }() + case 3: try { try decoder.decodeSingularMessageField(value: &_storage._joinCondition) }() + case 4: try { try decoder.decodeSingularEnumField(value: &_storage._joinType) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._left { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._right { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + try { if let v = _storage._joinCondition { + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + } }() + if _storage._joinType != .unspecified { + try visitor.visitSingularEnumField(value: _storage._joinType, fieldNumber: 4) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_LateralJoin, rhs: Spark_Connect_LateralJoin) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._left != rhs_storage._left {return false} + if _storage._right != rhs_storage._right {return false} + if _storage._joinCondition != rhs_storage._joinCondition {return false} + if _storage._joinType != rhs_storage._joinType {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} diff --git a/Sources/SparkConnect/types.grpc.swift b/Sources/SparkConnect/types.grpc.swift new file mode 100644 index 0000000..50ce28d --- /dev/null +++ b/Sources/SparkConnect/types.grpc.swift @@ -0,0 +1,26 @@ +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// DO NOT EDIT. +// swift-format-ignore-file +// +// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/types.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/grpc/grpc-swift + +// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/types.pb.swift b/Sources/SparkConnect/types.pb.swift new file mode 100644 index 0000000..3b0acd1 --- /dev/null +++ b/Sources/SparkConnect/types.pb.swift @@ -0,0 +1,2457 @@ +// DO NOT EDIT. +// swift-format-ignore-file +// swiftlint:disable all +// +// Generated by the Swift generator plugin for the protocol buffer compiler. +// Source: spark/connect/types.proto +// +// For information on using the generated types, please see the documentation: +// https://github.com/apple/swift-protobuf/ + +// +// Licensed to the Apache Software Foundation (ASF) under one or more +// contributor license agreements. See the NOTICE file distributed with +// this work for additional information regarding copyright ownership. +// The ASF licenses this file to You under the Apache License, Version 2.0 +// (the "License"); you may not use this file except in compliance with +// the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +import SwiftProtobuf + +// If the compiler emits an error on this type, it is because this file +// was generated by a version of the `protoc` Swift plug-in that is +// incompatible with the version of SwiftProtobuf to which you are linking. +// Please ensure that you are building against the same version of the API +// that was used to generate this file. +fileprivate struct _GeneratedWithProtocGenSwiftVersion: SwiftProtobuf.ProtobufAPIVersionCheck { + struct _2: SwiftProtobuf.ProtobufAPIVersion_2 {} + typealias Version = _2 +} + +/// This message describes the logical [[DataType]] of something. It does not carry the value +/// itself but only describes it. +struct Spark_Connect_DataType: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var kind: OneOf_Kind? { + get {return _storage._kind} + set {_uniqueStorage()._kind = newValue} + } + + var null: Spark_Connect_DataType.NULL { + get { + if case .null(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.NULL() + } + set {_uniqueStorage()._kind = .null(newValue)} + } + + var binary: Spark_Connect_DataType.Binary { + get { + if case .binary(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Binary() + } + set {_uniqueStorage()._kind = .binary(newValue)} + } + + var boolean: Spark_Connect_DataType.Boolean { + get { + if case .boolean(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Boolean() + } + set {_uniqueStorage()._kind = .boolean(newValue)} + } + + /// Numeric types + var byte: Spark_Connect_DataType.Byte { + get { + if case .byte(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Byte() + } + set {_uniqueStorage()._kind = .byte(newValue)} + } + + var short: Spark_Connect_DataType.Short { + get { + if case .short(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Short() + } + set {_uniqueStorage()._kind = .short(newValue)} + } + + var integer: Spark_Connect_DataType.Integer { + get { + if case .integer(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Integer() + } + set {_uniqueStorage()._kind = .integer(newValue)} + } + + var long: Spark_Connect_DataType.Long { + get { + if case .long(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Long() + } + set {_uniqueStorage()._kind = .long(newValue)} + } + + var float: Spark_Connect_DataType.FloatMessage { + get { + if case .float(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.FloatMessage() + } + set {_uniqueStorage()._kind = .float(newValue)} + } + + var double: Spark_Connect_DataType.DoubleMessage { + get { + if case .double(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.DoubleMessage() + } + set {_uniqueStorage()._kind = .double(newValue)} + } + + var decimal: Spark_Connect_DataType.Decimal { + get { + if case .decimal(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Decimal() + } + set {_uniqueStorage()._kind = .decimal(newValue)} + } + + /// String types + var string: Spark_Connect_DataType.StringMessage { + get { + if case .string(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.StringMessage() + } + set {_uniqueStorage()._kind = .string(newValue)} + } + + var char: Spark_Connect_DataType.Char { + get { + if case .char(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Char() + } + set {_uniqueStorage()._kind = .char(newValue)} + } + + var varChar: Spark_Connect_DataType.VarChar { + get { + if case .varChar(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.VarChar() + } + set {_uniqueStorage()._kind = .varChar(newValue)} + } + + /// Datatime types + var date: Spark_Connect_DataType.Date { + get { + if case .date(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Date() + } + set {_uniqueStorage()._kind = .date(newValue)} + } + + var timestamp: Spark_Connect_DataType.Timestamp { + get { + if case .timestamp(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Timestamp() + } + set {_uniqueStorage()._kind = .timestamp(newValue)} + } + + var timestampNtz: Spark_Connect_DataType.TimestampNTZ { + get { + if case .timestampNtz(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.TimestampNTZ() + } + set {_uniqueStorage()._kind = .timestampNtz(newValue)} + } + + /// Interval types + var calendarInterval: Spark_Connect_DataType.CalendarInterval { + get { + if case .calendarInterval(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.CalendarInterval() + } + set {_uniqueStorage()._kind = .calendarInterval(newValue)} + } + + var yearMonthInterval: Spark_Connect_DataType.YearMonthInterval { + get { + if case .yearMonthInterval(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.YearMonthInterval() + } + set {_uniqueStorage()._kind = .yearMonthInterval(newValue)} + } + + var dayTimeInterval: Spark_Connect_DataType.DayTimeInterval { + get { + if case .dayTimeInterval(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.DayTimeInterval() + } + set {_uniqueStorage()._kind = .dayTimeInterval(newValue)} + } + + /// Complex types + var array: Spark_Connect_DataType.Array { + get { + if case .array(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Array() + } + set {_uniqueStorage()._kind = .array(newValue)} + } + + var `struct`: Spark_Connect_DataType.Struct { + get { + if case .struct(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Struct() + } + set {_uniqueStorage()._kind = .struct(newValue)} + } + + var map: Spark_Connect_DataType.Map { + get { + if case .map(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Map() + } + set {_uniqueStorage()._kind = .map(newValue)} + } + + var variant: Spark_Connect_DataType.Variant { + get { + if case .variant(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Variant() + } + set {_uniqueStorage()._kind = .variant(newValue)} + } + + /// UserDefinedType + var udt: Spark_Connect_DataType.UDT { + get { + if case .udt(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.UDT() + } + set {_uniqueStorage()._kind = .udt(newValue)} + } + + /// UnparsedDataType + var unparsed: Spark_Connect_DataType.Unparsed { + get { + if case .unparsed(let v)? = _storage._kind {return v} + return Spark_Connect_DataType.Unparsed() + } + set {_uniqueStorage()._kind = .unparsed(newValue)} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + enum OneOf_Kind: Equatable, Sendable { + case null(Spark_Connect_DataType.NULL) + case binary(Spark_Connect_DataType.Binary) + case boolean(Spark_Connect_DataType.Boolean) + /// Numeric types + case byte(Spark_Connect_DataType.Byte) + case short(Spark_Connect_DataType.Short) + case integer(Spark_Connect_DataType.Integer) + case long(Spark_Connect_DataType.Long) + case float(Spark_Connect_DataType.FloatMessage) + case double(Spark_Connect_DataType.DoubleMessage) + case decimal(Spark_Connect_DataType.Decimal) + /// String types + case string(Spark_Connect_DataType.StringMessage) + case char(Spark_Connect_DataType.Char) + case varChar(Spark_Connect_DataType.VarChar) + /// Datatime types + case date(Spark_Connect_DataType.Date) + case timestamp(Spark_Connect_DataType.Timestamp) + case timestampNtz(Spark_Connect_DataType.TimestampNTZ) + /// Interval types + case calendarInterval(Spark_Connect_DataType.CalendarInterval) + case yearMonthInterval(Spark_Connect_DataType.YearMonthInterval) + case dayTimeInterval(Spark_Connect_DataType.DayTimeInterval) + /// Complex types + case array(Spark_Connect_DataType.Array) + case `struct`(Spark_Connect_DataType.Struct) + case map(Spark_Connect_DataType.Map) + case variant(Spark_Connect_DataType.Variant) + /// UserDefinedType + case udt(Spark_Connect_DataType.UDT) + /// UnparsedDataType + case unparsed(Spark_Connect_DataType.Unparsed) + + } + + struct Boolean: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Byte: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Short: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Integer: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Long: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct FloatMessage: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct DoubleMessage: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct StringMessage: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var collation: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Binary: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct NULL: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Timestamp: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Date: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct TimestampNTZ: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct CalendarInterval: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct YearMonthInterval: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var startField: Int32 { + get {return _startField ?? 0} + set {_startField = newValue} + } + /// Returns true if `startField` has been explicitly set. + var hasStartField: Bool {return self._startField != nil} + /// Clears the value of `startField`. Subsequent reads from it will return its default value. + mutating func clearStartField() {self._startField = nil} + + var endField: Int32 { + get {return _endField ?? 0} + set {_endField = newValue} + } + /// Returns true if `endField` has been explicitly set. + var hasEndField: Bool {return self._endField != nil} + /// Clears the value of `endField`. Subsequent reads from it will return its default value. + mutating func clearEndField() {self._endField = nil} + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _startField: Int32? = nil + fileprivate var _endField: Int32? = nil + } + + struct DayTimeInterval: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var startField: Int32 { + get {return _startField ?? 0} + set {_startField = newValue} + } + /// Returns true if `startField` has been explicitly set. + var hasStartField: Bool {return self._startField != nil} + /// Clears the value of `startField`. Subsequent reads from it will return its default value. + mutating func clearStartField() {self._startField = nil} + + var endField: Int32 { + get {return _endField ?? 0} + set {_endField = newValue} + } + /// Returns true if `endField` has been explicitly set. + var hasEndField: Bool {return self._endField != nil} + /// Clears the value of `endField`. Subsequent reads from it will return its default value. + mutating func clearEndField() {self._endField = nil} + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _startField: Int32? = nil + fileprivate var _endField: Int32? = nil + } + + /// Start compound types. + struct Char: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var length: Int32 = 0 + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct VarChar: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var length: Int32 = 0 + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Decimal: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var scale: Int32 { + get {return _scale ?? 0} + set {_scale = newValue} + } + /// Returns true if `scale` has been explicitly set. + var hasScale: Bool {return self._scale != nil} + /// Clears the value of `scale`. Subsequent reads from it will return its default value. + mutating func clearScale() {self._scale = nil} + + var precision: Int32 { + get {return _precision ?? 0} + set {_precision = newValue} + } + /// Returns true if `precision` has been explicitly set. + var hasPrecision: Bool {return self._precision != nil} + /// Clears the value of `precision`. Subsequent reads from it will return its default value. + mutating func clearPrecision() {self._precision = nil} + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _scale: Int32? = nil + fileprivate var _precision: Int32? = nil + } + + struct StructField: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var name: String = String() + + var dataType: Spark_Connect_DataType { + get {return _dataType ?? Spark_Connect_DataType()} + set {_dataType = newValue} + } + /// Returns true if `dataType` has been explicitly set. + var hasDataType: Bool {return self._dataType != nil} + /// Clears the value of `dataType`. Subsequent reads from it will return its default value. + mutating func clearDataType() {self._dataType = nil} + + var nullable: Bool = false + + var metadata: String { + get {return _metadata ?? String()} + set {_metadata = newValue} + } + /// Returns true if `metadata` has been explicitly set. + var hasMetadata: Bool {return self._metadata != nil} + /// Clears the value of `metadata`. Subsequent reads from it will return its default value. + mutating func clearMetadata() {self._metadata = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _dataType: Spark_Connect_DataType? = nil + fileprivate var _metadata: String? = nil + } + + struct Struct: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var fields: [Spark_Connect_DataType.StructField] = [] + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct Array: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var elementType: Spark_Connect_DataType { + get {return _storage._elementType ?? Spark_Connect_DataType()} + set {_uniqueStorage()._elementType = newValue} + } + /// Returns true if `elementType` has been explicitly set. + var hasElementType: Bool {return _storage._elementType != nil} + /// Clears the value of `elementType`. Subsequent reads from it will return its default value. + mutating func clearElementType() {_uniqueStorage()._elementType = nil} + + var containsNull: Bool { + get {return _storage._containsNull} + set {_uniqueStorage()._containsNull = newValue} + } + + var typeVariationReference: UInt32 { + get {return _storage._typeVariationReference} + set {_uniqueStorage()._typeVariationReference = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + struct Map: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var keyType: Spark_Connect_DataType { + get {return _storage._keyType ?? Spark_Connect_DataType()} + set {_uniqueStorage()._keyType = newValue} + } + /// Returns true if `keyType` has been explicitly set. + var hasKeyType: Bool {return _storage._keyType != nil} + /// Clears the value of `keyType`. Subsequent reads from it will return its default value. + mutating func clearKeyType() {_uniqueStorage()._keyType = nil} + + var valueType: Spark_Connect_DataType { + get {return _storage._valueType ?? Spark_Connect_DataType()} + set {_uniqueStorage()._valueType = newValue} + } + /// Returns true if `valueType` has been explicitly set. + var hasValueType: Bool {return _storage._valueType != nil} + /// Clears the value of `valueType`. Subsequent reads from it will return its default value. + mutating func clearValueType() {_uniqueStorage()._valueType = nil} + + var valueContainsNull: Bool { + get {return _storage._valueContainsNull} + set {_uniqueStorage()._valueContainsNull = newValue} + } + + var typeVariationReference: UInt32 { + get {return _storage._typeVariationReference} + set {_uniqueStorage()._typeVariationReference = newValue} + } + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + struct Variant: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var typeVariationReference: UInt32 = 0 + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + struct UDT: @unchecked Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + var type: String { + get {return _storage._type} + set {_uniqueStorage()._type = newValue} + } + + /// Required for Scala/Java UDT + var jvmClass: String { + get {return _storage._jvmClass ?? String()} + set {_uniqueStorage()._jvmClass = newValue} + } + /// Returns true if `jvmClass` has been explicitly set. + var hasJvmClass: Bool {return _storage._jvmClass != nil} + /// Clears the value of `jvmClass`. Subsequent reads from it will return its default value. + mutating func clearJvmClass() {_uniqueStorage()._jvmClass = nil} + + /// Required for Python UDT + var pythonClass: String { + get {return _storage._pythonClass ?? String()} + set {_uniqueStorage()._pythonClass = newValue} + } + /// Returns true if `pythonClass` has been explicitly set. + var hasPythonClass: Bool {return _storage._pythonClass != nil} + /// Clears the value of `pythonClass`. Subsequent reads from it will return its default value. + mutating func clearPythonClass() {_uniqueStorage()._pythonClass = nil} + + /// Required for Python UDT + var serializedPythonClass: String { + get {return _storage._serializedPythonClass ?? String()} + set {_uniqueStorage()._serializedPythonClass = newValue} + } + /// Returns true if `serializedPythonClass` has been explicitly set. + var hasSerializedPythonClass: Bool {return _storage._serializedPythonClass != nil} + /// Clears the value of `serializedPythonClass`. Subsequent reads from it will return its default value. + mutating func clearSerializedPythonClass() {_uniqueStorage()._serializedPythonClass = nil} + + /// Required for Python UDT + var sqlType: Spark_Connect_DataType { + get {return _storage._sqlType ?? Spark_Connect_DataType()} + set {_uniqueStorage()._sqlType = newValue} + } + /// Returns true if `sqlType` has been explicitly set. + var hasSqlType: Bool {return _storage._sqlType != nil} + /// Clears the value of `sqlType`. Subsequent reads from it will return its default value. + mutating func clearSqlType() {_uniqueStorage()._sqlType = nil} + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance + } + + struct Unparsed: Sendable { + // SwiftProtobuf.Message conformance is added in an extension below. See the + // `Message` and `Message+*Additions` files in the SwiftProtobuf library for + // methods supported on all messages. + + /// (Required) The unparsed data type string + var dataTypeString: String = String() + + var unknownFields = SwiftProtobuf.UnknownStorage() + + init() {} + } + + init() {} + + fileprivate var _storage = _StorageClass.defaultInstance +} + +// MARK: - Code below here is support for the SwiftProtobuf runtime. + +fileprivate let _protobuf_package = "spark.connect" + +extension Spark_Connect_DataType: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = _protobuf_package + ".DataType" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "null"), + 2: .same(proto: "binary"), + 3: .same(proto: "boolean"), + 4: .same(proto: "byte"), + 5: .same(proto: "short"), + 6: .same(proto: "integer"), + 7: .same(proto: "long"), + 8: .same(proto: "float"), + 9: .same(proto: "double"), + 10: .same(proto: "decimal"), + 11: .same(proto: "string"), + 12: .same(proto: "char"), + 13: .standard(proto: "var_char"), + 14: .same(proto: "date"), + 15: .same(proto: "timestamp"), + 16: .standard(proto: "timestamp_ntz"), + 17: .standard(proto: "calendar_interval"), + 18: .standard(proto: "year_month_interval"), + 19: .standard(proto: "day_time_interval"), + 20: .same(proto: "array"), + 21: .same(proto: "struct"), + 22: .same(proto: "map"), + 25: .same(proto: "variant"), + 23: .same(proto: "udt"), + 24: .same(proto: "unparsed"), + ] + + fileprivate class _StorageClass { + var _kind: Spark_Connect_DataType.OneOf_Kind? + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _kind = source._kind + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { + var v: Spark_Connect_DataType.NULL? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .null(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .null(v) + } + }() + case 2: try { + var v: Spark_Connect_DataType.Binary? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .binary(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .binary(v) + } + }() + case 3: try { + var v: Spark_Connect_DataType.Boolean? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .boolean(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .boolean(v) + } + }() + case 4: try { + var v: Spark_Connect_DataType.Byte? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .byte(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .byte(v) + } + }() + case 5: try { + var v: Spark_Connect_DataType.Short? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .short(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .short(v) + } + }() + case 6: try { + var v: Spark_Connect_DataType.Integer? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .integer(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .integer(v) + } + }() + case 7: try { + var v: Spark_Connect_DataType.Long? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .long(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .long(v) + } + }() + case 8: try { + var v: Spark_Connect_DataType.FloatMessage? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .float(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .float(v) + } + }() + case 9: try { + var v: Spark_Connect_DataType.DoubleMessage? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .double(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .double(v) + } + }() + case 10: try { + var v: Spark_Connect_DataType.Decimal? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .decimal(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .decimal(v) + } + }() + case 11: try { + var v: Spark_Connect_DataType.StringMessage? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .string(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .string(v) + } + }() + case 12: try { + var v: Spark_Connect_DataType.Char? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .char(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .char(v) + } + }() + case 13: try { + var v: Spark_Connect_DataType.VarChar? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .varChar(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .varChar(v) + } + }() + case 14: try { + var v: Spark_Connect_DataType.Date? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .date(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .date(v) + } + }() + case 15: try { + var v: Spark_Connect_DataType.Timestamp? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .timestamp(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .timestamp(v) + } + }() + case 16: try { + var v: Spark_Connect_DataType.TimestampNTZ? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .timestampNtz(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .timestampNtz(v) + } + }() + case 17: try { + var v: Spark_Connect_DataType.CalendarInterval? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .calendarInterval(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .calendarInterval(v) + } + }() + case 18: try { + var v: Spark_Connect_DataType.YearMonthInterval? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .yearMonthInterval(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .yearMonthInterval(v) + } + }() + case 19: try { + var v: Spark_Connect_DataType.DayTimeInterval? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .dayTimeInterval(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .dayTimeInterval(v) + } + }() + case 20: try { + var v: Spark_Connect_DataType.Array? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .array(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .array(v) + } + }() + case 21: try { + var v: Spark_Connect_DataType.Struct? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .struct(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .struct(v) + } + }() + case 22: try { + var v: Spark_Connect_DataType.Map? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .map(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .map(v) + } + }() + case 23: try { + var v: Spark_Connect_DataType.UDT? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .udt(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .udt(v) + } + }() + case 24: try { + var v: Spark_Connect_DataType.Unparsed? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .unparsed(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .unparsed(v) + } + }() + case 25: try { + var v: Spark_Connect_DataType.Variant? + var hadOneofValue = false + if let current = _storage._kind { + hadOneofValue = true + if case .variant(let m) = current {v = m} + } + try decoder.decodeSingularMessageField(value: &v) + if let v = v { + if hadOneofValue {try decoder.handleConflictingOneOf()} + _storage._kind = .variant(v) + } + }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + switch _storage._kind { + case .null?: try { + guard case .null(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + }() + case .binary?: try { + guard case .binary(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + }() + case .boolean?: try { + guard case .boolean(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 3) + }() + case .byte?: try { + guard case .byte(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 4) + }() + case .short?: try { + guard case .short(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + }() + case .integer?: try { + guard case .integer(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 6) + }() + case .long?: try { + guard case .long(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 7) + }() + case .float?: try { + guard case .float(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 8) + }() + case .double?: try { + guard case .double(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 9) + }() + case .decimal?: try { + guard case .decimal(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 10) + }() + case .string?: try { + guard case .string(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 11) + }() + case .char?: try { + guard case .char(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 12) + }() + case .varChar?: try { + guard case .varChar(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 13) + }() + case .date?: try { + guard case .date(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 14) + }() + case .timestamp?: try { + guard case .timestamp(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 15) + }() + case .timestampNtz?: try { + guard case .timestampNtz(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 16) + }() + case .calendarInterval?: try { + guard case .calendarInterval(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 17) + }() + case .yearMonthInterval?: try { + guard case .yearMonthInterval(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 18) + }() + case .dayTimeInterval?: try { + guard case .dayTimeInterval(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 19) + }() + case .array?: try { + guard case .array(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 20) + }() + case .struct?: try { + guard case .struct(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 21) + }() + case .map?: try { + guard case .map(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 22) + }() + case .udt?: try { + guard case .udt(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 23) + }() + case .unparsed?: try { + guard case .unparsed(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 24) + }() + case .variant?: try { + guard case .variant(let v)? = _storage._kind else { preconditionFailure() } + try visitor.visitSingularMessageField(value: v, fieldNumber: 25) + }() + case nil: break + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType, rhs: Spark_Connect_DataType) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._kind != rhs_storage._kind {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Boolean: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Boolean" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Boolean, rhs: Spark_Connect_DataType.Boolean) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Byte: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Byte" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Byte, rhs: Spark_Connect_DataType.Byte) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Short: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Short" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Short, rhs: Spark_Connect_DataType.Short) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Integer: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Integer" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Integer, rhs: Spark_Connect_DataType.Integer) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Long: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Long" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Long, rhs: Spark_Connect_DataType.Long) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.FloatMessage: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Float" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.FloatMessage, rhs: Spark_Connect_DataType.FloatMessage) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.DoubleMessage: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Double" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.DoubleMessage, rhs: Spark_Connect_DataType.DoubleMessage) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.StringMessage: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".String" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + 2: .same(proto: "collation"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + case 2: try { try decoder.decodeSingularStringField(value: &self.collation) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + if !self.collation.isEmpty { + try visitor.visitSingularStringField(value: self.collation, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.StringMessage, rhs: Spark_Connect_DataType.StringMessage) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.collation != rhs.collation {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Binary: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Binary" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Binary, rhs: Spark_Connect_DataType.Binary) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.NULL: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".NULL" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.NULL, rhs: Spark_Connect_DataType.NULL) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Timestamp: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Timestamp" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Timestamp, rhs: Spark_Connect_DataType.Timestamp) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Date: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Date" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Date, rhs: Spark_Connect_DataType.Date) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.TimestampNTZ: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".TimestampNTZ" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.TimestampNTZ, rhs: Spark_Connect_DataType.TimestampNTZ) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.CalendarInterval: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".CalendarInterval" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.CalendarInterval, rhs: Spark_Connect_DataType.CalendarInterval) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.YearMonthInterval: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".YearMonthInterval" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "start_field"), + 2: .standard(proto: "end_field"), + 3: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self._startField) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self._endField) }() + case 3: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._startField { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 1) + } }() + try { if let v = self._endField { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 2) + } }() + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.YearMonthInterval, rhs: Spark_Connect_DataType.YearMonthInterval) -> Bool { + if lhs._startField != rhs._startField {return false} + if lhs._endField != rhs._endField {return false} + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.DayTimeInterval: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".DayTimeInterval" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "start_field"), + 2: .standard(proto: "end_field"), + 3: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self._startField) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self._endField) }() + case 3: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._startField { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 1) + } }() + try { if let v = self._endField { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 2) + } }() + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.DayTimeInterval, rhs: Spark_Connect_DataType.DayTimeInterval) -> Bool { + if lhs._startField != rhs._startField {return false} + if lhs._endField != rhs._endField {return false} + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Char: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Char" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "length"), + 2: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.length) }() + case 2: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.length != 0 { + try visitor.visitSingularInt32Field(value: self.length, fieldNumber: 1) + } + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Char, rhs: Spark_Connect_DataType.Char) -> Bool { + if lhs.length != rhs.length {return false} + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.VarChar: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".VarChar" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "length"), + 2: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self.length) }() + case 2: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.length != 0 { + try visitor.visitSingularInt32Field(value: self.length, fieldNumber: 1) + } + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.VarChar, rhs: Spark_Connect_DataType.VarChar) -> Bool { + if lhs.length != rhs.length {return false} + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Decimal: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Decimal" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "scale"), + 2: .same(proto: "precision"), + 3: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularInt32Field(value: &self._scale) }() + case 2: try { try decoder.decodeSingularInt32Field(value: &self._precision) }() + case 3: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = self._scale { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 1) + } }() + try { if let v = self._precision { + try visitor.visitSingularInt32Field(value: v, fieldNumber: 2) + } }() + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 3) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Decimal, rhs: Spark_Connect_DataType.Decimal) -> Bool { + if lhs._scale != rhs._scale {return false} + if lhs._precision != rhs._precision {return false} + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.StructField: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".StructField" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "name"), + 2: .standard(proto: "data_type"), + 3: .same(proto: "nullable"), + 4: .same(proto: "metadata"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.name) }() + case 2: try { try decoder.decodeSingularMessageField(value: &self._dataType) }() + case 3: try { try decoder.decodeSingularBoolField(value: &self.nullable) }() + case 4: try { try decoder.decodeSingularStringField(value: &self._metadata) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !self.name.isEmpty { + try visitor.visitSingularStringField(value: self.name, fieldNumber: 1) + } + try { if let v = self._dataType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if self.nullable != false { + try visitor.visitSingularBoolField(value: self.nullable, fieldNumber: 3) + } + try { if let v = self._metadata { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.StructField, rhs: Spark_Connect_DataType.StructField) -> Bool { + if lhs.name != rhs.name {return false} + if lhs._dataType != rhs._dataType {return false} + if lhs.nullable != rhs.nullable {return false} + if lhs._metadata != rhs._metadata {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Struct: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Struct" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "fields"), + 2: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeRepeatedMessageField(value: &self.fields) }() + case 2: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.fields.isEmpty { + try visitor.visitRepeatedMessageField(value: self.fields, fieldNumber: 1) + } + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 2) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Struct, rhs: Spark_Connect_DataType.Struct) -> Bool { + if lhs.fields != rhs.fields {return false} + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Array: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Array" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "element_type"), + 2: .standard(proto: "contains_null"), + 3: .standard(proto: "type_variation_reference"), + ] + + fileprivate class _StorageClass { + var _elementType: Spark_Connect_DataType? = nil + var _containsNull: Bool = false + var _typeVariationReference: UInt32 = 0 + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _elementType = source._elementType + _containsNull = source._containsNull + _typeVariationReference = source._typeVariationReference + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._elementType) }() + case 2: try { try decoder.decodeSingularBoolField(value: &_storage._containsNull) }() + case 3: try { try decoder.decodeSingularUInt32Field(value: &_storage._typeVariationReference) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._elementType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + if _storage._containsNull != false { + try visitor.visitSingularBoolField(value: _storage._containsNull, fieldNumber: 2) + } + if _storage._typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: _storage._typeVariationReference, fieldNumber: 3) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Array, rhs: Spark_Connect_DataType.Array) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._elementType != rhs_storage._elementType {return false} + if _storage._containsNull != rhs_storage._containsNull {return false} + if _storage._typeVariationReference != rhs_storage._typeVariationReference {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Map: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Map" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "key_type"), + 2: .standard(proto: "value_type"), + 3: .standard(proto: "value_contains_null"), + 4: .standard(proto: "type_variation_reference"), + ] + + fileprivate class _StorageClass { + var _keyType: Spark_Connect_DataType? = nil + var _valueType: Spark_Connect_DataType? = nil + var _valueContainsNull: Bool = false + var _typeVariationReference: UInt32 = 0 + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _keyType = source._keyType + _valueType = source._valueType + _valueContainsNull = source._valueContainsNull + _typeVariationReference = source._typeVariationReference + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularMessageField(value: &_storage._keyType) }() + case 2: try { try decoder.decodeSingularMessageField(value: &_storage._valueType) }() + case 3: try { try decoder.decodeSingularBoolField(value: &_storage._valueContainsNull) }() + case 4: try { try decoder.decodeSingularUInt32Field(value: &_storage._typeVariationReference) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + try { if let v = _storage._keyType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 1) + } }() + try { if let v = _storage._valueType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 2) + } }() + if _storage._valueContainsNull != false { + try visitor.visitSingularBoolField(value: _storage._valueContainsNull, fieldNumber: 3) + } + if _storage._typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: _storage._typeVariationReference, fieldNumber: 4) + } + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Map, rhs: Spark_Connect_DataType.Map) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._keyType != rhs_storage._keyType {return false} + if _storage._valueType != rhs_storage._valueType {return false} + if _storage._valueContainsNull != rhs_storage._valueContainsNull {return false} + if _storage._typeVariationReference != rhs_storage._typeVariationReference {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Variant: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Variant" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "type_variation_reference"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularUInt32Field(value: &self.typeVariationReference) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if self.typeVariationReference != 0 { + try visitor.visitSingularUInt32Field(value: self.typeVariationReference, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Variant, rhs: Spark_Connect_DataType.Variant) -> Bool { + if lhs.typeVariationReference != rhs.typeVariationReference {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.UDT: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".UDT" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .same(proto: "type"), + 2: .standard(proto: "jvm_class"), + 3: .standard(proto: "python_class"), + 4: .standard(proto: "serialized_python_class"), + 5: .standard(proto: "sql_type"), + ] + + fileprivate class _StorageClass { + var _type: String = String() + var _jvmClass: String? = nil + var _pythonClass: String? = nil + var _serializedPythonClass: String? = nil + var _sqlType: Spark_Connect_DataType? = nil + + #if swift(>=5.10) + // This property is used as the initial default value for new instances of the type. + // The type itself is protecting the reference to its storage via CoW semantics. + // This will force a copy to be made of this reference when the first mutation occurs; + // hence, it is safe to mark this as `nonisolated(unsafe)`. + static nonisolated(unsafe) let defaultInstance = _StorageClass() + #else + static let defaultInstance = _StorageClass() + #endif + + private init() {} + + init(copying source: _StorageClass) { + _type = source._type + _jvmClass = source._jvmClass + _pythonClass = source._pythonClass + _serializedPythonClass = source._serializedPythonClass + _sqlType = source._sqlType + } + } + + fileprivate mutating func _uniqueStorage() -> _StorageClass { + if !isKnownUniquelyReferenced(&_storage) { + _storage = _StorageClass(copying: _storage) + } + return _storage + } + + mutating func decodeMessage(decoder: inout D) throws { + _ = _uniqueStorage() + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &_storage._type) }() + case 2: try { try decoder.decodeSingularStringField(value: &_storage._jvmClass) }() + case 3: try { try decoder.decodeSingularStringField(value: &_storage._pythonClass) }() + case 4: try { try decoder.decodeSingularStringField(value: &_storage._serializedPythonClass) }() + case 5: try { try decoder.decodeSingularMessageField(value: &_storage._sqlType) }() + default: break + } + } + } + } + + func traverse(visitor: inout V) throws { + try withExtendedLifetime(_storage) { (_storage: _StorageClass) in + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every if/case branch local when no optimizations + // are enabled. https://github.com/apple/swift-protobuf/issues/1034 and + // https://github.com/apple/swift-protobuf/issues/1182 + if !_storage._type.isEmpty { + try visitor.visitSingularStringField(value: _storage._type, fieldNumber: 1) + } + try { if let v = _storage._jvmClass { + try visitor.visitSingularStringField(value: v, fieldNumber: 2) + } }() + try { if let v = _storage._pythonClass { + try visitor.visitSingularStringField(value: v, fieldNumber: 3) + } }() + try { if let v = _storage._serializedPythonClass { + try visitor.visitSingularStringField(value: v, fieldNumber: 4) + } }() + try { if let v = _storage._sqlType { + try visitor.visitSingularMessageField(value: v, fieldNumber: 5) + } }() + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.UDT, rhs: Spark_Connect_DataType.UDT) -> Bool { + if lhs._storage !== rhs._storage { + let storagesAreEqual: Bool = withExtendedLifetime((lhs._storage, rhs._storage)) { (_args: (_StorageClass, _StorageClass)) in + let _storage = _args.0 + let rhs_storage = _args.1 + if _storage._type != rhs_storage._type {return false} + if _storage._jvmClass != rhs_storage._jvmClass {return false} + if _storage._pythonClass != rhs_storage._pythonClass {return false} + if _storage._serializedPythonClass != rhs_storage._serializedPythonClass {return false} + if _storage._sqlType != rhs_storage._sqlType {return false} + return true + } + if !storagesAreEqual {return false} + } + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} + +extension Spark_Connect_DataType.Unparsed: SwiftProtobuf.Message, SwiftProtobuf._MessageImplementationBase, SwiftProtobuf._ProtoNameProviding { + static let protoMessageName: String = Spark_Connect_DataType.protoMessageName + ".Unparsed" + static let _protobuf_nameMap: SwiftProtobuf._NameMap = [ + 1: .standard(proto: "data_type_string"), + ] + + mutating func decodeMessage(decoder: inout D) throws { + while let fieldNumber = try decoder.nextFieldNumber() { + // The use of inline closures is to circumvent an issue where the compiler + // allocates stack space for every case branch when no optimizations are + // enabled. https://github.com/apple/swift-protobuf/issues/1034 + switch fieldNumber { + case 1: try { try decoder.decodeSingularStringField(value: &self.dataTypeString) }() + default: break + } + } + } + + func traverse(visitor: inout V) throws { + if !self.dataTypeString.isEmpty { + try visitor.visitSingularStringField(value: self.dataTypeString, fieldNumber: 1) + } + try unknownFields.traverse(visitor: &visitor) + } + + static func ==(lhs: Spark_Connect_DataType.Unparsed, rhs: Spark_Connect_DataType.Unparsed) -> Bool { + if lhs.dataTypeString != rhs.dataTypeString {return false} + if lhs.unknownFields != rhs.unknownFields {return false} + return true + } +} From b54204ba2c6075011d94f79f470a99ef34b4fb94 Mon Sep 17 00:00:00 2001 From: Dongjoon Hyun Date: Mon, 10 Mar 2025 22:26:07 -0700 Subject: [PATCH 2/2] Remove empty grpc files --- Sources/SparkConnect/catalog.grpc.swift | 26 ------------------- Sources/SparkConnect/commands.grpc.swift | 26 ------------------- Sources/SparkConnect/common.grpc.swift | 26 ------------------- .../SparkConnect/example_plugins.grpc.swift | 26 ------------------- Sources/SparkConnect/expressions.grpc.swift | 26 ------------------- Sources/SparkConnect/ml.grpc.swift | 26 ------------------- Sources/SparkConnect/ml_common.grpc.swift | 26 ------------------- Sources/SparkConnect/relations.grpc.swift | 26 ------------------- Sources/SparkConnect/types.grpc.swift | 26 ------------------- 9 files changed, 234 deletions(-) delete mode 100644 Sources/SparkConnect/catalog.grpc.swift delete mode 100644 Sources/SparkConnect/commands.grpc.swift delete mode 100644 Sources/SparkConnect/common.grpc.swift delete mode 100644 Sources/SparkConnect/example_plugins.grpc.swift delete mode 100644 Sources/SparkConnect/expressions.grpc.swift delete mode 100644 Sources/SparkConnect/ml.grpc.swift delete mode 100644 Sources/SparkConnect/ml_common.grpc.swift delete mode 100644 Sources/SparkConnect/relations.grpc.swift delete mode 100644 Sources/SparkConnect/types.grpc.swift diff --git a/Sources/SparkConnect/catalog.grpc.swift b/Sources/SparkConnect/catalog.grpc.swift deleted file mode 100644 index 054efa7..0000000 --- a/Sources/SparkConnect/catalog.grpc.swift +++ /dev/null @@ -1,26 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// DO NOT EDIT. -// swift-format-ignore-file -// -// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. -// Source: spark/connect/catalog.proto -// -// For information on using the generated types, please see the documentation: -// https://github.com/grpc/grpc-swift - -// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/commands.grpc.swift b/Sources/SparkConnect/commands.grpc.swift deleted file mode 100644 index 87fa382..0000000 --- a/Sources/SparkConnect/commands.grpc.swift +++ /dev/null @@ -1,26 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// DO NOT EDIT. -// swift-format-ignore-file -// -// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. -// Source: spark/connect/commands.proto -// -// For information on using the generated types, please see the documentation: -// https://github.com/grpc/grpc-swift - -// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/common.grpc.swift b/Sources/SparkConnect/common.grpc.swift deleted file mode 100644 index 27958c5..0000000 --- a/Sources/SparkConnect/common.grpc.swift +++ /dev/null @@ -1,26 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// DO NOT EDIT. -// swift-format-ignore-file -// -// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. -// Source: spark/connect/common.proto -// -// For information on using the generated types, please see the documentation: -// https://github.com/grpc/grpc-swift - -// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/example_plugins.grpc.swift b/Sources/SparkConnect/example_plugins.grpc.swift deleted file mode 100644 index ba5648a..0000000 --- a/Sources/SparkConnect/example_plugins.grpc.swift +++ /dev/null @@ -1,26 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// DO NOT EDIT. -// swift-format-ignore-file -// -// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. -// Source: spark/connect/example_plugins.proto -// -// For information on using the generated types, please see the documentation: -// https://github.com/grpc/grpc-swift - -// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/expressions.grpc.swift b/Sources/SparkConnect/expressions.grpc.swift deleted file mode 100644 index c7af4ea..0000000 --- a/Sources/SparkConnect/expressions.grpc.swift +++ /dev/null @@ -1,26 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// DO NOT EDIT. -// swift-format-ignore-file -// -// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. -// Source: spark/connect/expressions.proto -// -// For information on using the generated types, please see the documentation: -// https://github.com/grpc/grpc-swift - -// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/ml.grpc.swift b/Sources/SparkConnect/ml.grpc.swift deleted file mode 100644 index 75c8e0d..0000000 --- a/Sources/SparkConnect/ml.grpc.swift +++ /dev/null @@ -1,26 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// DO NOT EDIT. -// swift-format-ignore-file -// -// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. -// Source: spark/connect/ml.proto -// -// For information on using the generated types, please see the documentation: -// https://github.com/grpc/grpc-swift - -// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/ml_common.grpc.swift b/Sources/SparkConnect/ml_common.grpc.swift deleted file mode 100644 index 9b19676..0000000 --- a/Sources/SparkConnect/ml_common.grpc.swift +++ /dev/null @@ -1,26 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// DO NOT EDIT. -// swift-format-ignore-file -// -// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. -// Source: spark/connect/ml_common.proto -// -// For information on using the generated types, please see the documentation: -// https://github.com/grpc/grpc-swift - -// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/relations.grpc.swift b/Sources/SparkConnect/relations.grpc.swift deleted file mode 100644 index 57a8084..0000000 --- a/Sources/SparkConnect/relations.grpc.swift +++ /dev/null @@ -1,26 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// DO NOT EDIT. -// swift-format-ignore-file -// -// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. -// Source: spark/connect/relations.proto -// -// For information on using the generated types, please see the documentation: -// https://github.com/grpc/grpc-swift - -// This file contained no services. \ No newline at end of file diff --git a/Sources/SparkConnect/types.grpc.swift b/Sources/SparkConnect/types.grpc.swift deleted file mode 100644 index 50ce28d..0000000 --- a/Sources/SparkConnect/types.grpc.swift +++ /dev/null @@ -1,26 +0,0 @@ -// -// Licensed to the Apache Software Foundation (ASF) under one or more -// contributor license agreements. See the NOTICE file distributed with -// this work for additional information regarding copyright ownership. -// The ASF licenses this file to You under the Apache License, Version 2.0 -// (the "License"); you may not use this file except in compliance with -// the License. You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// DO NOT EDIT. -// swift-format-ignore-file -// -// Generated by the gRPC Swift generator plugin for the protocol buffer compiler. -// Source: spark/connect/types.proto -// -// For information on using the generated types, please see the documentation: -// https://github.com/grpc/grpc-swift - -// This file contained no services. \ No newline at end of file